hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f71f0bf286ca8ce49bc5c297a075cd493190c592 | 2,108 | py | Python | lib/galaxy_test/api/test_display_applications.py | rhpvorderman/galaxy | 178015f8eff0b0c7a59c0d6756658f6428222837 | [
"CC-BY-3.0"
] | 1,085 | 2015-02-18T16:14:38.000Z | 2022-03-30T23:52:07.000Z | lib/galaxy_test/api/test_display_applications.py | rhpvorderman/galaxy | 178015f8eff0b0c7a59c0d6756658f6428222837 | [
"CC-BY-3.0"
] | 11,253 | 2015-02-18T17:47:32.000Z | 2022-03-31T21:47:03.000Z | lib/galaxy_test/api/test_display_applications.py | rhpvorderman/galaxy | 178015f8eff0b0c7a59c0d6756658f6428222837 | [
"CC-BY-3.0"
] | 1,000 | 2015-02-18T16:18:10.000Z | 2022-03-29T08:22:56.000Z | import random
from typing import List
from ._framework import ApiTestCase
class DisplayApplicationsApiTestCase(ApiTestCase):
def test_index(self):
response = self._get("display_applications")
self._assert_status_code_is(response, 200)
as_list = response.json()
assert isinstance(as_list, list)
assert len(as_list) > 0
for display_app in as_list:
self._assert_has_keys(display_app, "id", "name", "version", "filename_", "links")
def test_reload_as_admin(self):
response = self._post("display_applications/reload", admin=True)
self._assert_status_code_is(response, 200)
def test_reload_with_some_ids(self):
response = self._get("display_applications")
self._assert_status_code_is(response, 200)
display_apps = response.json()
all_ids = [display_app["id"] for display_app in display_apps]
input_ids = self._get_half_random_items(all_ids)
payload = {'ids': input_ids}
response = self._post("display_applications/reload", payload, admin=True)
self._assert_status_code_is(response, 200)
reloaded = response.json()["reloaded"]
assert len(reloaded) == len(input_ids)
assert all(elem in reloaded for elem in input_ids)
def test_reload_unknow_returns_as_failed(self):
unknown_id = "unknown"
payload = {'ids': [unknown_id]}
response = self._post("display_applications/reload", payload, admin=True)
self._assert_status_code_is(response, 200)
reloaded = response.json()["reloaded"]
failed = response.json()["failed"]
assert len(reloaded) == 0
assert len(failed) == 1
assert unknown_id in failed
def test_reload_as_non_admin_returns_403(self):
response = self._post("display_applications/reload")
self._assert_status_code_is(response, 403)
def _get_half_random_items(self, collection: List[str]) -> List[str]:
half_num_items = int(len(collection) / 2)
rval = random.sample(collection, half_num_items)
return rval
| 39.037037 | 93 | 0.680266 | import random
from typing import List
from ._framework import ApiTestCase
class DisplayApplicationsApiTestCase(ApiTestCase):
def test_index(self):
response = self._get("display_applications")
self._assert_status_code_is(response, 200)
as_list = response.json()
assert isinstance(as_list, list)
assert len(as_list) > 0
for display_app in as_list:
self._assert_has_keys(display_app, "id", "name", "version", "filename_", "links")
def test_reload_as_admin(self):
response = self._post("display_applications/reload", admin=True)
self._assert_status_code_is(response, 200)
def test_reload_with_some_ids(self):
response = self._get("display_applications")
self._assert_status_code_is(response, 200)
display_apps = response.json()
all_ids = [display_app["id"] for display_app in display_apps]
input_ids = self._get_half_random_items(all_ids)
payload = {'ids': input_ids}
response = self._post("display_applications/reload", payload, admin=True)
self._assert_status_code_is(response, 200)
reloaded = response.json()["reloaded"]
assert len(reloaded) == len(input_ids)
assert all(elem in reloaded for elem in input_ids)
def test_reload_unknow_returns_as_failed(self):
unknown_id = "unknown"
payload = {'ids': [unknown_id]}
response = self._post("display_applications/reload", payload, admin=True)
self._assert_status_code_is(response, 200)
reloaded = response.json()["reloaded"]
failed = response.json()["failed"]
assert len(reloaded) == 0
assert len(failed) == 1
assert unknown_id in failed
def test_reload_as_non_admin_returns_403(self):
response = self._post("display_applications/reload")
self._assert_status_code_is(response, 403)
def _get_half_random_items(self, collection: List[str]) -> List[str]:
half_num_items = int(len(collection) / 2)
rval = random.sample(collection, half_num_items)
return rval
| true | true |
f71f0c081079515df5975201c3eb0b6fb8e937f3 | 20,739 | py | Python | dask/tests/test_distributed.py | mmccarty/dask | 5602876f3389d039aba0d1a860922777843dbcb9 | [
"BSD-3-Clause"
] | null | null | null | dask/tests/test_distributed.py | mmccarty/dask | 5602876f3389d039aba0d1a860922777843dbcb9 | [
"BSD-3-Clause"
] | null | null | null | dask/tests/test_distributed.py | mmccarty/dask | 5602876f3389d039aba0d1a860922777843dbcb9 | [
"BSD-3-Clause"
] | null | null | null | import pytest
distributed = pytest.importorskip("distributed")
import asyncio
import os
from functools import partial
from operator import add
from distributed.utils_test import client as c # noqa F401
from distributed.utils_test import cluster_fixture # noqa F401
from distributed.utils_test import loop # noqa F401
from distributed.utils_test import cluster, gen_cluster, inc, varying
import dask
import dask.bag as db
from dask import compute, delayed, persist
from dask.delayed import Delayed
from dask.distributed import futures_of, wait
from dask.highlevelgraph import HighLevelGraph, MaterializedLayer
from dask.utils import get_named_args, tmpdir, tmpfile
if "should_check_state" in get_named_args(gen_cluster):
gen_cluster = partial(gen_cluster, should_check_state=False)
cluster = partial(cluster, should_check_state=False)
def test_can_import_client():
from dask.distributed import Client # noqa: F401
def test_can_import_nested_things():
from dask.distributed.protocol import dumps # noqa: F401
@gen_cluster(client=True)
async def test_persist(c, s, a, b):
x = delayed(inc)(1)
(x2,) = persist(x)
await wait(x2)
assert x2.key in a.data or x2.key in b.data
y = delayed(inc)(10)
y2, one = persist(y, 1)
await wait(y2)
assert y2.key in a.data or y2.key in b.data
def test_persist_nested(c):
a = delayed(1) + 5
b = a + 1
c = a + 2
result = persist({"a": a, "b": [1, 2, b]}, (c, 2), 4, [5])
assert isinstance(result[0]["a"], Delayed)
assert isinstance(result[0]["b"][2], Delayed)
assert isinstance(result[1][0], Delayed)
sol = ({"a": 6, "b": [1, 2, 7]}, (8, 2), 4, [5])
assert compute(*result) == sol
res = persist([a, b], c, 4, [5], traverse=False)
assert res[0][0] is a
assert res[0][1] is b
assert res[1].compute() == 8
assert res[2:] == (4, [5])
def test_futures_to_delayed_dataframe(c):
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
df = pd.DataFrame({"x": [1, 2, 3]})
futures = c.scatter([df, df])
ddf = dd.from_delayed(futures)
dd.utils.assert_eq(ddf.compute(), pd.concat([df, df], axis=0))
with pytest.raises(TypeError):
ddf = dd.from_delayed([1, 2])
@pytest.mark.parametrize("fuse", [True, False])
def test_fused_blockwise_dataframe_merge(c, fuse):
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
# Generate two DataFrames with more partitions than
# the `max_branch` default used for shuffling (32).
# We need a multi-stage shuffle to cover #7178 fix.
size = 35
df1 = pd.DataFrame({"x": range(size), "y": range(size)})
df2 = pd.DataFrame({"x": range(size), "z": range(size)})
ddf1 = dd.from_pandas(df1, npartitions=size) + 10
ddf2 = dd.from_pandas(df2, npartitions=5) + 10
df1 += 10
df2 += 10
with dask.config.set({"optimization.fuse.active": fuse}):
ddfm = ddf1.merge(ddf2, on=["x"], how="left")
ddfm.head() # https://github.com/dask/dask/issues/7178
dfm = ddfm.compute().sort_values("x")
# We call compute above since `sort_values` is not
# supported in `dask.dataframe`
dd.utils.assert_eq(
dfm, df1.merge(df2, on=["x"], how="left").sort_values("x"), check_index=False
)
def test_futures_to_delayed_bag(c):
L = [1, 2, 3]
futures = c.scatter([L, L])
b = db.from_delayed(futures)
assert list(b) == L + L
def test_futures_to_delayed_array(c):
da = pytest.importorskip("dask.array")
from dask.array.utils import assert_eq
np = pytest.importorskip("numpy")
x = np.arange(5)
futures = c.scatter([x, x])
A = da.concatenate(
[da.from_delayed(f, shape=x.shape, dtype=x.dtype) for f in futures], axis=0
)
assert_eq(A.compute(), np.concatenate([x, x], axis=0))
@gen_cluster(client=True)
async def test_local_get_with_distributed_active(c, s, a, b):
with dask.config.set(scheduler="sync"):
x = delayed(inc)(1).persist()
await asyncio.sleep(0.01)
assert not s.tasks # scheduler hasn't done anything
x = delayed(inc)(2).persist(scheduler="sync") # noqa F841
await asyncio.sleep(0.01)
assert not s.tasks # scheduler hasn't done anything
def test_to_hdf_distributed(c):
pytest.importorskip("numpy")
pytest.importorskip("pandas")
from ..dataframe.io.tests.test_hdf import test_to_hdf
test_to_hdf()
@pytest.mark.parametrize(
"npartitions",
[
1,
pytest.param(
4,
marks=pytest.mark.xfail(reason="HDF not multi-process safe", strict=False),
),
pytest.param(
10,
marks=pytest.mark.xfail(reason="HDF not multi-process safe", strict=False),
),
],
)
def test_to_hdf_scheduler_distributed(npartitions, c):
pytest.importorskip("numpy")
pytest.importorskip("pandas")
from ..dataframe.io.tests.test_hdf import test_to_hdf_schedulers
test_to_hdf_schedulers(None, npartitions)
@gen_cluster(client=True)
async def test_serializable_groupby_agg(c, s, a, b):
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [1, 0, 1, 0]})
ddf = dd.from_pandas(df, npartitions=2)
result = ddf.groupby("y").agg("count", split_out=2)
# Check Culling and Compute
agg0 = await c.compute(result.partitions[0])
agg1 = await c.compute(result.partitions[1])
dd.utils.assert_eq(
pd.concat([agg0, agg1]),
pd.DataFrame({"x": [2, 2], "y": [0, 1]}).set_index("y"),
)
def test_futures_in_graph(c):
x, y = delayed(1), delayed(2)
xx = delayed(add)(x, x)
yy = delayed(add)(y, y)
xxyy = delayed(add)(xx, yy)
xxyy2 = c.persist(xxyy)
xxyy3 = delayed(add)(xxyy2, 10)
assert xxyy3.compute(scheduler="dask.distributed") == ((1 + 1) + (2 + 2)) + 10
def test_zarr_distributed_roundtrip(c):
da = pytest.importorskip("dask.array")
pytest.importorskip("zarr")
with tmpdir() as d:
a = da.zeros((3, 3), chunks=(1, 1))
a.to_zarr(d)
a2 = da.from_zarr(d)
da.assert_eq(a, a2, scheduler=c)
assert a2.chunks == a.chunks
def test_zarr_in_memory_distributed_err(c):
da = pytest.importorskip("dask.array")
zarr = pytest.importorskip("zarr")
chunks = (1, 1)
a = da.ones((3, 3), chunks=chunks)
z = zarr.zeros_like(a, chunks=chunks)
with pytest.raises(RuntimeError):
a.to_zarr(z)
def test_scheduler_equals_client(c):
x = delayed(lambda: 1)()
assert x.compute(scheduler=c) == 1
assert c.run_on_scheduler(lambda dask_scheduler: dask_scheduler.story(x.key))
@gen_cluster(client=True)
async def test_await(c, s, a, b):
x = dask.delayed(inc)(1)
x = await x.persist()
assert x.key in s.tasks
assert a.data or b.data
assert all(f.done() for f in futures_of(x))
def test_local_scheduler():
async def f():
x = dask.delayed(inc)(1)
y = x + 1
z = await y.persist()
assert len(z.dask) == 1
asyncio.get_event_loop().run_until_complete(f())
@gen_cluster(client=True)
async def test_annotations_blockwise_unpack(c, s, a, b):
da = pytest.importorskip("dask.array")
np = pytest.importorskip("numpy")
from dask.array.utils import assert_eq
# A flaky doubling function -- need extra args because it is called before
# application to establish dtype/meta.
scale = varying([ZeroDivisionError("one"), ZeroDivisionError("two"), 2, 2])
def flaky_double(x):
return scale() * x
# A reliable double function.
def reliable_double(x):
return 2 * x
x = da.ones(10, chunks=(5,))
# The later annotations should not override the earlier annotations
with dask.annotate(retries=2):
y = x.map_blocks(flaky_double, meta=np.array((), dtype=np.float_))
with dask.annotate(retries=0):
z = y.map_blocks(reliable_double, meta=np.array((), dtype=np.float_))
with dask.config.set(optimization__fuse__active=False):
z = await c.compute(z)
assert_eq(z, np.ones(10) * 4.0)
@pytest.mark.parametrize(
"io",
[
"ones",
"zeros",
"full",
],
)
@pytest.mark.parametrize("fuse", [True, False, None])
def test_blockwise_array_creation(c, io, fuse):
np = pytest.importorskip("numpy")
da = pytest.importorskip("dask.array")
chunks = (5, 2)
shape = (10, 4)
if io == "ones":
darr = da.ones(shape, chunks=chunks)
narr = np.ones(shape)
elif io == "zeros":
darr = da.zeros(shape, chunks=chunks)
narr = np.zeros(shape)
elif io == "full":
darr = da.full(shape, 10, chunks=chunks)
narr = np.full(shape, 10)
darr += 2
narr += 2
with dask.config.set({"optimization.fuse.active": fuse}):
darr.compute()
dsk = dask.array.optimize(darr.dask, darr.__dask_keys__())
# dsk should be a dict unless fuse is explicitly False
assert isinstance(dsk, dict) == (fuse is not False)
da.assert_eq(darr, narr, scheduler=c)
@pytest.mark.parametrize(
"io",
["parquet-pyarrow", "parquet-fastparquet", "csv", "hdf"],
)
@pytest.mark.parametrize("fuse", [True, False, None])
@pytest.mark.parametrize("from_futures", [True, False])
def test_blockwise_dataframe_io(c, tmpdir, io, fuse, from_futures):
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
df = pd.DataFrame({"x": [1, 2, 3] * 5, "y": range(15)})
if from_futures:
parts = [df.iloc[:5], df.iloc[5:10], df.iloc[10:15]]
futs = c.scatter(parts)
ddf0 = dd.from_delayed(futs, meta=parts[0])
else:
ddf0 = dd.from_pandas(df, npartitions=3)
if io.startswith("parquet"):
if io == "parquet-pyarrow":
pytest.importorskip("pyarrow.parquet")
engine = "pyarrow"
else:
pytest.importorskip("fastparquet")
engine = "fastparquet"
ddf0.to_parquet(str(tmpdir), engine=engine)
ddf = dd.read_parquet(str(tmpdir), engine=engine)
elif io == "csv":
ddf0.to_csv(str(tmpdir), index=False)
ddf = dd.read_csv(os.path.join(str(tmpdir), "*"))
elif io == "hdf":
pytest.importorskip("tables")
fn = str(tmpdir.join("h5"))
ddf0.to_hdf(fn, "/data*")
ddf = dd.read_hdf(fn, "/data*")
df = df[["x"]] + 10
ddf = ddf[["x"]] + 10
with dask.config.set({"optimization.fuse.active": fuse}):
ddf.compute()
dsk = dask.dataframe.optimize(ddf.dask, ddf.__dask_keys__())
# dsk should not be a dict unless fuse is explicitly True
assert isinstance(dsk, dict) == bool(fuse)
dd.assert_eq(ddf, df, check_index=False)
def test_blockwise_fusion_after_compute(c):
# See: https://github.com/dask/dask/issues/7720
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
# Simple sequence of Dask-Dataframe manipulations
df = pd.DataFrame({"x": [1, 2, 3] * 5})
series = dd.from_pandas(df, npartitions=2)["x"]
result = series < 3
# Trigger an optimization of the `series` graph
# (which `result` depends on), then compute `result`.
# This is essentially a test of `rewrite_blockwise`.
series_len = len(series)
assert series_len == 15
assert df.x[result.compute()].sum() == 15
@gen_cluster(client=True)
async def test_blockwise_numpy_args(c, s, a, b):
"""Test pack/unpack of blockwise that includes a NumPy literal argument"""
da = pytest.importorskip("dask.array")
np = pytest.importorskip("numpy")
def fn(x, dt):
assert type(dt) is np.uint16
return x.astype(dt)
arr = da.blockwise(
fn, "x", da.ones(1000), "x", np.uint16(42), None, dtype=np.uint16
)
res = await c.compute(arr.sum(), optimize_graph=False)
assert res == 1000
@gen_cluster(client=True)
async def test_blockwise_numpy_kwargs(c, s, a, b):
"""Test pack/unpack of blockwise that includes a NumPy literal keyword argument"""
da = pytest.importorskip("dask.array")
np = pytest.importorskip("numpy")
def fn(x, dt=None):
assert type(dt) is np.uint16
return x.astype(dt)
arr = da.blockwise(fn, "x", da.ones(1000), "x", dtype=np.uint16, dt=np.uint16(42))
res = await c.compute(arr.sum(), optimize_graph=False)
assert res == 1000
def test_blockwise_different_optimization(c):
# Regression test for incorrect results due to SubgraphCallable.__eq__
# not correctly handling subgraphs with the same outputs and arity but
# different internals (GH-7632). The bug is triggered by distributed
# because it uses a function cache.
da = pytest.importorskip("dask.array")
np = pytest.importorskip("numpy")
u = da.from_array(np.arange(3))
v = da.from_array(np.array([10 + 2j, 7 - 3j, 8 + 1j]))
cv = v.conj()
x = u * cv
(cv,) = dask.optimize(cv)
y = u * cv
expected = np.array([0 + 0j, 7 + 3j, 16 - 2j])
with dask.config.set({"optimization.fuse.active": False}):
x_value = x.compute()
y_value = y.compute()
np.testing.assert_equal(x_value, expected)
np.testing.assert_equal(y_value, expected)
@gen_cluster(client=True)
async def test_combo_of_layer_types(c, s, a, b):
"""Check pack/unpack of a HLG that has every type of Layers!"""
da = pytest.importorskip("dask.array")
dd = pytest.importorskip("dask.dataframe")
np = pytest.importorskip("numpy")
pd = pytest.importorskip("pandas")
def add(x, y, z, extra_arg):
return x + y + z + extra_arg
y = c.submit(lambda x: x, 2)
z = c.submit(lambda x: x, 3)
x = da.blockwise(
add,
"x",
da.zeros((3,), chunks=(1,)),
"x",
da.ones((3,), chunks=(1,)),
"x",
y,
None,
concatenate=False,
dtype=int,
extra_arg=z,
)
df = dd.from_pandas(pd.DataFrame({"a": np.arange(3)}), npartitions=3)
df = df.shuffle("a", shuffle="tasks")
df = df["a"].to_dask_array()
res = x.sum() + df.sum()
res = await c.compute(res, optimize_graph=False)
assert res == 21
def test_blockwise_concatenate(c):
"""Test a blockwise operation with concatenated axes"""
da = pytest.importorskip("dask.array")
np = pytest.importorskip("numpy")
def f(x, y):
da.assert_eq(y, [[0, 1, 2]])
return x
x = da.from_array(np.array([0, 1, 2]))
y = da.from_array(np.array([[0, 1, 2]]))
z = da.blockwise(
f,
("i"),
x,
("i"),
y,
("ij"),
dtype=x.dtype,
concatenate=True,
)
c.compute(z, optimize_graph=False)
da.assert_eq(z, x, scheduler=c)
@gen_cluster(client=True)
async def test_map_partitions_partition_info(c, s, a, b):
dd = pytest.importorskip("dask.dataframe")
pd = pytest.importorskip("pandas")
ddf = dd.from_pandas(pd.DataFrame({"a": range(10)}), npartitions=2)
res = await c.compute(
ddf.map_partitions(lambda x, partition_info=None: partition_info)
)
assert res[0] == {"number": 0, "division": 0}
assert res[1] == {"number": 1, "division": 5}
@gen_cluster(client=True)
async def test_futures_in_subgraphs(c, s, a, b):
"""Copied from distributed (tests/test_client.py)"""
dd = pytest.importorskip("dask.dataframe")
pd = pytest.importorskip("pandas")
ddf = dd.from_pandas(
pd.DataFrame(
dict(
uid=range(50),
enter_time=pd.date_range(
start="2020-01-01", end="2020-09-01", periods=50, tz="UTC"
),
)
),
npartitions=1,
)
ddf = ddf[ddf.uid.isin(range(29))].persist()
ddf["day"] = ddf.enter_time.dt.day_name()
ddf = await c.submit(dd.categorical.categorize, ddf, columns=["day"], index=False)
@pytest.mark.flaky(reruns=5, reruns_delay=5)
@gen_cluster(client=True)
async def test_shuffle_priority(c, s, a, b):
pd = pytest.importorskip("pandas")
np = pytest.importorskip("numpy")
dd = pytest.importorskip("dask.dataframe")
# Test marked as "flaky" since the scheduling behavior
# is not deterministic. Note that the test is still
# very likely to fail every time if the "split" tasks
# are not prioritized correctly
df = pd.DataFrame({"a": range(1000)})
ddf = dd.from_pandas(df, npartitions=10)
ddf2 = ddf.shuffle("a", shuffle="tasks", max_branch=32)
await c.compute(ddf2)
# Parse transition log for processing tasks
log = [
eval(l[0])[0]
for l in s.transition_log
if l[1] == "processing" and "simple-shuffle-" in l[0]
]
# Make sure most "split" tasks are processing before
# any "combine" tasks begin
late_split = np.quantile(
[i for i, st in enumerate(log) if st.startswith("split")], 0.75
)
early_combine = np.quantile(
[i for i, st in enumerate(log) if st.startswith("simple")], 0.25
)
assert late_split < early_combine
@gen_cluster(client=True)
async def test_map_partitions_da_input(c, s, a, b):
"""Check that map_partitions can handle a dask array input"""
np = pytest.importorskip("numpy")
pd = pytest.importorskip("pandas")
da = pytest.importorskip("dask.array")
datasets = pytest.importorskip("dask.datasets")
def f(d, a):
assert isinstance(d, pd.DataFrame)
assert isinstance(a, np.ndarray)
return d
df = datasets.timeseries(freq="1d").persist()
arr = da.ones((1,), chunks=1).persist()
await c.compute(df.map_partitions(f, arr, meta=df._meta))
def test_map_partitions_df_input():
"""
Check that map_partitions can handle a delayed
partition of a dataframe input
"""
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
def f(d, a):
assert isinstance(d, pd.DataFrame)
assert isinstance(a, pd.DataFrame)
return d
def main():
item_df = dd.from_pandas(pd.DataFrame({"a": range(10)}), npartitions=1)
ddf = item_df.to_delayed()[0].persist()
merged_df = dd.from_pandas(pd.DataFrame({"b": range(10)}), npartitions=1)
# Notice, we include a shuffle in order to trigger a complex culling
merged_df = merged_df.shuffle(on="b")
merged_df.map_partitions(
f, ddf, meta=merged_df, enforce_metadata=False
).compute()
with distributed.LocalCluster(
scheduler_port=0,
dashboard_address=":0",
asynchronous=False,
n_workers=1,
nthreads=1,
processes=False,
) as cluster:
with distributed.Client(cluster, asynchronous=False):
main()
@gen_cluster(client=True)
async def test_annotation_pack_unpack(c, s, a, b):
hlg = HighLevelGraph({"l1": MaterializedLayer({"n": 42})}, {"l1": set()})
annotations = {"workers": ("alice",)}
packed_hlg = hlg.__dask_distributed_pack__(c, ["n"], annotations)
unpacked_hlg = HighLevelGraph.__dask_distributed_unpack__(packed_hlg)
annotations = unpacked_hlg["annotations"]
assert annotations == {"workers": {"n": ("alice",)}}
@gen_cluster(client=True)
async def test_pack_MaterializedLayer_handles_futures_in_graph_properly(c, s, a, b):
fut = c.submit(inc, 1)
hlg = HighLevelGraph(
{"l1": MaterializedLayer({"x": fut, "y": (inc, "x"), "z": (inc, "y")})},
{"l1": set()},
)
# fill hlg.key_dependencies cache. This excludes known futures, so only
# includes a subset of all dependencies. Previously if the cache was present
# the future dependencies would be missing when packed.
hlg.get_all_dependencies()
packed = hlg.__dask_distributed_pack__(c, ["z"], {})
unpacked = HighLevelGraph.__dask_distributed_unpack__(packed)
assert unpacked["deps"] == {"x": {fut.key}, "y": {fut.key}, "z": {"y"}}
@gen_cluster(client=True)
async def test_to_sql_engine_kwargs(c, s, a, b):
# https://github.com/dask/dask/issues/8738
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
pytest.importorskip("sqlalchemy")
df = pd.DataFrame({"a": range(10), "b": range(10)})
df.index.name = "index"
ddf = dd.from_pandas(df, npartitions=1)
with tmpfile() as f:
uri = f"sqlite:///{f}"
result = ddf.to_sql(
"test", uri, index=True, engine_kwargs={"echo": False}, compute=False
)
await c.compute(result)
dd.utils.assert_eq(
ddf,
dd.read_sql_table("test", uri, "index"),
check_divisions=False,
)
| 30.320175 | 87 | 0.629105 | import pytest
distributed = pytest.importorskip("distributed")
import asyncio
import os
from functools import partial
from operator import add
from distributed.utils_test import client as c
from distributed.utils_test import cluster_fixture
from distributed.utils_test import loop
from distributed.utils_test import cluster, gen_cluster, inc, varying
import dask
import dask.bag as db
from dask import compute, delayed, persist
from dask.delayed import Delayed
from dask.distributed import futures_of, wait
from dask.highlevelgraph import HighLevelGraph, MaterializedLayer
from dask.utils import get_named_args, tmpdir, tmpfile
if "should_check_state" in get_named_args(gen_cluster):
gen_cluster = partial(gen_cluster, should_check_state=False)
cluster = partial(cluster, should_check_state=False)
def test_can_import_client():
from dask.distributed import Client
def test_can_import_nested_things():
from dask.distributed.protocol import dumps
@gen_cluster(client=True)
async def test_persist(c, s, a, b):
x = delayed(inc)(1)
(x2,) = persist(x)
await wait(x2)
assert x2.key in a.data or x2.key in b.data
y = delayed(inc)(10)
y2, one = persist(y, 1)
await wait(y2)
assert y2.key in a.data or y2.key in b.data
def test_persist_nested(c):
a = delayed(1) + 5
b = a + 1
c = a + 2
result = persist({"a": a, "b": [1, 2, b]}, (c, 2), 4, [5])
assert isinstance(result[0]["a"], Delayed)
assert isinstance(result[0]["b"][2], Delayed)
assert isinstance(result[1][0], Delayed)
sol = ({"a": 6, "b": [1, 2, 7]}, (8, 2), 4, [5])
assert compute(*result) == sol
res = persist([a, b], c, 4, [5], traverse=False)
assert res[0][0] is a
assert res[0][1] is b
assert res[1].compute() == 8
assert res[2:] == (4, [5])
def test_futures_to_delayed_dataframe(c):
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
df = pd.DataFrame({"x": [1, 2, 3]})
futures = c.scatter([df, df])
ddf = dd.from_delayed(futures)
dd.utils.assert_eq(ddf.compute(), pd.concat([df, df], axis=0))
with pytest.raises(TypeError):
ddf = dd.from_delayed([1, 2])
@pytest.mark.parametrize("fuse", [True, False])
def test_fused_blockwise_dataframe_merge(c, fuse):
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
= 35
df1 = pd.DataFrame({"x": range(size), "y": range(size)})
df2 = pd.DataFrame({"x": range(size), "z": range(size)})
ddf1 = dd.from_pandas(df1, npartitions=size) + 10
ddf2 = dd.from_pandas(df2, npartitions=5) + 10
df1 += 10
df2 += 10
with dask.config.set({"optimization.fuse.active": fuse}):
ddfm = ddf1.merge(ddf2, on=["x"], how="left")
ddfm.head()
dfm = ddfm.compute().sort_values("x")
dd.utils.assert_eq(
dfm, df1.merge(df2, on=["x"], how="left").sort_values("x"), check_index=False
)
def test_futures_to_delayed_bag(c):
L = [1, 2, 3]
futures = c.scatter([L, L])
b = db.from_delayed(futures)
assert list(b) == L + L
def test_futures_to_delayed_array(c):
da = pytest.importorskip("dask.array")
from dask.array.utils import assert_eq
np = pytest.importorskip("numpy")
x = np.arange(5)
futures = c.scatter([x, x])
A = da.concatenate(
[da.from_delayed(f, shape=x.shape, dtype=x.dtype) for f in futures], axis=0
)
assert_eq(A.compute(), np.concatenate([x, x], axis=0))
@gen_cluster(client=True)
async def test_local_get_with_distributed_active(c, s, a, b):
with dask.config.set(scheduler="sync"):
x = delayed(inc)(1).persist()
await asyncio.sleep(0.01)
assert not s.tasks
x = delayed(inc)(2).persist(scheduler="sync") # noqa F841
await asyncio.sleep(0.01)
assert not s.tasks # scheduler hasn't done anything
def test_to_hdf_distributed(c):
pytest.importorskip("numpy")
pytest.importorskip("pandas")
from ..dataframe.io.tests.test_hdf import test_to_hdf
test_to_hdf()
@pytest.mark.parametrize(
"npartitions",
[
1,
pytest.param(
4,
marks=pytest.mark.xfail(reason="HDF not multi-process safe", strict=False),
),
pytest.param(
10,
marks=pytest.mark.xfail(reason="HDF not multi-process safe", strict=False),
),
],
)
def test_to_hdf_scheduler_distributed(npartitions, c):
pytest.importorskip("numpy")
pytest.importorskip("pandas")
from ..dataframe.io.tests.test_hdf import test_to_hdf_schedulers
test_to_hdf_schedulers(None, npartitions)
@gen_cluster(client=True)
async def test_serializable_groupby_agg(c, s, a, b):
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [1, 0, 1, 0]})
ddf = dd.from_pandas(df, npartitions=2)
result = ddf.groupby("y").agg("count", split_out=2)
agg0 = await c.compute(result.partitions[0])
agg1 = await c.compute(result.partitions[1])
dd.utils.assert_eq(
pd.concat([agg0, agg1]),
pd.DataFrame({"x": [2, 2], "y": [0, 1]}).set_index("y"),
)
def test_futures_in_graph(c):
x, y = delayed(1), delayed(2)
xx = delayed(add)(x, x)
yy = delayed(add)(y, y)
xxyy = delayed(add)(xx, yy)
xxyy2 = c.persist(xxyy)
xxyy3 = delayed(add)(xxyy2, 10)
assert xxyy3.compute(scheduler="dask.distributed") == ((1 + 1) + (2 + 2)) + 10
def test_zarr_distributed_roundtrip(c):
da = pytest.importorskip("dask.array")
pytest.importorskip("zarr")
with tmpdir() as d:
a = da.zeros((3, 3), chunks=(1, 1))
a.to_zarr(d)
a2 = da.from_zarr(d)
da.assert_eq(a, a2, scheduler=c)
assert a2.chunks == a.chunks
def test_zarr_in_memory_distributed_err(c):
da = pytest.importorskip("dask.array")
zarr = pytest.importorskip("zarr")
chunks = (1, 1)
a = da.ones((3, 3), chunks=chunks)
z = zarr.zeros_like(a, chunks=chunks)
with pytest.raises(RuntimeError):
a.to_zarr(z)
def test_scheduler_equals_client(c):
x = delayed(lambda: 1)()
assert x.compute(scheduler=c) == 1
assert c.run_on_scheduler(lambda dask_scheduler: dask_scheduler.story(x.key))
@gen_cluster(client=True)
async def test_await(c, s, a, b):
x = dask.delayed(inc)(1)
x = await x.persist()
assert x.key in s.tasks
assert a.data or b.data
assert all(f.done() for f in futures_of(x))
def test_local_scheduler():
async def f():
x = dask.delayed(inc)(1)
y = x + 1
z = await y.persist()
assert len(z.dask) == 1
asyncio.get_event_loop().run_until_complete(f())
@gen_cluster(client=True)
async def test_annotations_blockwise_unpack(c, s, a, b):
da = pytest.importorskip("dask.array")
np = pytest.importorskip("numpy")
from dask.array.utils import assert_eq
scale = varying([ZeroDivisionError("one"), ZeroDivisionError("two"), 2, 2])
def flaky_double(x):
return scale() * x
def reliable_double(x):
return 2 * x
x = da.ones(10, chunks=(5,))
with dask.annotate(retries=2):
y = x.map_blocks(flaky_double, meta=np.array((), dtype=np.float_))
with dask.annotate(retries=0):
z = y.map_blocks(reliable_double, meta=np.array((), dtype=np.float_))
with dask.config.set(optimization__fuse__active=False):
z = await c.compute(z)
assert_eq(z, np.ones(10) * 4.0)
@pytest.mark.parametrize(
"io",
[
"ones",
"zeros",
"full",
],
)
@pytest.mark.parametrize("fuse", [True, False, None])
def test_blockwise_array_creation(c, io, fuse):
np = pytest.importorskip("numpy")
da = pytest.importorskip("dask.array")
chunks = (5, 2)
shape = (10, 4)
if io == "ones":
darr = da.ones(shape, chunks=chunks)
narr = np.ones(shape)
elif io == "zeros":
darr = da.zeros(shape, chunks=chunks)
narr = np.zeros(shape)
elif io == "full":
darr = da.full(shape, 10, chunks=chunks)
narr = np.full(shape, 10)
darr += 2
narr += 2
with dask.config.set({"optimization.fuse.active": fuse}):
darr.compute()
dsk = dask.array.optimize(darr.dask, darr.__dask_keys__())
assert isinstance(dsk, dict) == (fuse is not False)
da.assert_eq(darr, narr, scheduler=c)
@pytest.mark.parametrize(
"io",
["parquet-pyarrow", "parquet-fastparquet", "csv", "hdf"],
)
@pytest.mark.parametrize("fuse", [True, False, None])
@pytest.mark.parametrize("from_futures", [True, False])
def test_blockwise_dataframe_io(c, tmpdir, io, fuse, from_futures):
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
df = pd.DataFrame({"x": [1, 2, 3] * 5, "y": range(15)})
if from_futures:
parts = [df.iloc[:5], df.iloc[5:10], df.iloc[10:15]]
futs = c.scatter(parts)
ddf0 = dd.from_delayed(futs, meta=parts[0])
else:
ddf0 = dd.from_pandas(df, npartitions=3)
if io.startswith("parquet"):
if io == "parquet-pyarrow":
pytest.importorskip("pyarrow.parquet")
engine = "pyarrow"
else:
pytest.importorskip("fastparquet")
engine = "fastparquet"
ddf0.to_parquet(str(tmpdir), engine=engine)
ddf = dd.read_parquet(str(tmpdir), engine=engine)
elif io == "csv":
ddf0.to_csv(str(tmpdir), index=False)
ddf = dd.read_csv(os.path.join(str(tmpdir), "*"))
elif io == "hdf":
pytest.importorskip("tables")
fn = str(tmpdir.join("h5"))
ddf0.to_hdf(fn, "/data*")
ddf = dd.read_hdf(fn, "/data*")
df = df[["x"]] + 10
ddf = ddf[["x"]] + 10
with dask.config.set({"optimization.fuse.active": fuse}):
ddf.compute()
dsk = dask.dataframe.optimize(ddf.dask, ddf.__dask_keys__())
assert isinstance(dsk, dict) == bool(fuse)
dd.assert_eq(ddf, df, check_index=False)
def test_blockwise_fusion_after_compute(c):
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
df = pd.DataFrame({"x": [1, 2, 3] * 5})
series = dd.from_pandas(df, npartitions=2)["x"]
result = series < 3
series_len = len(series)
assert series_len == 15
assert df.x[result.compute()].sum() == 15
@gen_cluster(client=True)
async def test_blockwise_numpy_args(c, s, a, b):
da = pytest.importorskip("dask.array")
np = pytest.importorskip("numpy")
def fn(x, dt):
assert type(dt) is np.uint16
return x.astype(dt)
arr = da.blockwise(
fn, "x", da.ones(1000), "x", np.uint16(42), None, dtype=np.uint16
)
res = await c.compute(arr.sum(), optimize_graph=False)
assert res == 1000
@gen_cluster(client=True)
async def test_blockwise_numpy_kwargs(c, s, a, b):
da = pytest.importorskip("dask.array")
np = pytest.importorskip("numpy")
def fn(x, dt=None):
assert type(dt) is np.uint16
return x.astype(dt)
arr = da.blockwise(fn, "x", da.ones(1000), "x", dtype=np.uint16, dt=np.uint16(42))
res = await c.compute(arr.sum(), optimize_graph=False)
assert res == 1000
def test_blockwise_different_optimization(c):
da = pytest.importorskip("dask.array")
np = pytest.importorskip("numpy")
u = da.from_array(np.arange(3))
v = da.from_array(np.array([10 + 2j, 7 - 3j, 8 + 1j]))
cv = v.conj()
x = u * cv
(cv,) = dask.optimize(cv)
y = u * cv
expected = np.array([0 + 0j, 7 + 3j, 16 - 2j])
with dask.config.set({"optimization.fuse.active": False}):
x_value = x.compute()
y_value = y.compute()
np.testing.assert_equal(x_value, expected)
np.testing.assert_equal(y_value, expected)
@gen_cluster(client=True)
async def test_combo_of_layer_types(c, s, a, b):
da = pytest.importorskip("dask.array")
dd = pytest.importorskip("dask.dataframe")
np = pytest.importorskip("numpy")
pd = pytest.importorskip("pandas")
def add(x, y, z, extra_arg):
return x + y + z + extra_arg
y = c.submit(lambda x: x, 2)
z = c.submit(lambda x: x, 3)
x = da.blockwise(
add,
"x",
da.zeros((3,), chunks=(1,)),
"x",
da.ones((3,), chunks=(1,)),
"x",
y,
None,
concatenate=False,
dtype=int,
extra_arg=z,
)
df = dd.from_pandas(pd.DataFrame({"a": np.arange(3)}), npartitions=3)
df = df.shuffle("a", shuffle="tasks")
df = df["a"].to_dask_array()
res = x.sum() + df.sum()
res = await c.compute(res, optimize_graph=False)
assert res == 21
def test_blockwise_concatenate(c):
da = pytest.importorskip("dask.array")
np = pytest.importorskip("numpy")
def f(x, y):
da.assert_eq(y, [[0, 1, 2]])
return x
x = da.from_array(np.array([0, 1, 2]))
y = da.from_array(np.array([[0, 1, 2]]))
z = da.blockwise(
f,
("i"),
x,
("i"),
y,
("ij"),
dtype=x.dtype,
concatenate=True,
)
c.compute(z, optimize_graph=False)
da.assert_eq(z, x, scheduler=c)
@gen_cluster(client=True)
async def test_map_partitions_partition_info(c, s, a, b):
dd = pytest.importorskip("dask.dataframe")
pd = pytest.importorskip("pandas")
ddf = dd.from_pandas(pd.DataFrame({"a": range(10)}), npartitions=2)
res = await c.compute(
ddf.map_partitions(lambda x, partition_info=None: partition_info)
)
assert res[0] == {"number": 0, "division": 0}
assert res[1] == {"number": 1, "division": 5}
@gen_cluster(client=True)
async def test_futures_in_subgraphs(c, s, a, b):
dd = pytest.importorskip("dask.dataframe")
pd = pytest.importorskip("pandas")
ddf = dd.from_pandas(
pd.DataFrame(
dict(
uid=range(50),
enter_time=pd.date_range(
start="2020-01-01", end="2020-09-01", periods=50, tz="UTC"
),
)
),
npartitions=1,
)
ddf = ddf[ddf.uid.isin(range(29))].persist()
ddf["day"] = ddf.enter_time.dt.day_name()
ddf = await c.submit(dd.categorical.categorize, ddf, columns=["day"], index=False)
@pytest.mark.flaky(reruns=5, reruns_delay=5)
@gen_cluster(client=True)
async def test_shuffle_priority(c, s, a, b):
pd = pytest.importorskip("pandas")
np = pytest.importorskip("numpy")
dd = pytest.importorskip("dask.dataframe")
df = pd.DataFrame({"a": range(1000)})
ddf = dd.from_pandas(df, npartitions=10)
ddf2 = ddf.shuffle("a", shuffle="tasks", max_branch=32)
await c.compute(ddf2)
log = [
eval(l[0])[0]
for l in s.transition_log
if l[1] == "processing" and "simple-shuffle-" in l[0]
]
late_split = np.quantile(
[i for i, st in enumerate(log) if st.startswith("split")], 0.75
)
early_combine = np.quantile(
[i for i, st in enumerate(log) if st.startswith("simple")], 0.25
)
assert late_split < early_combine
@gen_cluster(client=True)
async def test_map_partitions_da_input(c, s, a, b):
np = pytest.importorskip("numpy")
pd = pytest.importorskip("pandas")
da = pytest.importorskip("dask.array")
datasets = pytest.importorskip("dask.datasets")
def f(d, a):
assert isinstance(d, pd.DataFrame)
assert isinstance(a, np.ndarray)
return d
df = datasets.timeseries(freq="1d").persist()
arr = da.ones((1,), chunks=1).persist()
await c.compute(df.map_partitions(f, arr, meta=df._meta))
def test_map_partitions_df_input():
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
def f(d, a):
assert isinstance(d, pd.DataFrame)
assert isinstance(a, pd.DataFrame)
return d
def main():
item_df = dd.from_pandas(pd.DataFrame({"a": range(10)}), npartitions=1)
ddf = item_df.to_delayed()[0].persist()
merged_df = dd.from_pandas(pd.DataFrame({"b": range(10)}), npartitions=1)
merged_df = merged_df.shuffle(on="b")
merged_df.map_partitions(
f, ddf, meta=merged_df, enforce_metadata=False
).compute()
with distributed.LocalCluster(
scheduler_port=0,
dashboard_address=":0",
asynchronous=False,
n_workers=1,
nthreads=1,
processes=False,
) as cluster:
with distributed.Client(cluster, asynchronous=False):
main()
@gen_cluster(client=True)
async def test_annotation_pack_unpack(c, s, a, b):
hlg = HighLevelGraph({"l1": MaterializedLayer({"n": 42})}, {"l1": set()})
annotations = {"workers": ("alice",)}
packed_hlg = hlg.__dask_distributed_pack__(c, ["n"], annotations)
unpacked_hlg = HighLevelGraph.__dask_distributed_unpack__(packed_hlg)
annotations = unpacked_hlg["annotations"]
assert annotations == {"workers": {"n": ("alice",)}}
@gen_cluster(client=True)
async def test_pack_MaterializedLayer_handles_futures_in_graph_properly(c, s, a, b):
fut = c.submit(inc, 1)
hlg = HighLevelGraph(
{"l1": MaterializedLayer({"x": fut, "y": (inc, "x"), "z": (inc, "y")})},
{"l1": set()},
)
hlg.get_all_dependencies()
packed = hlg.__dask_distributed_pack__(c, ["z"], {})
unpacked = HighLevelGraph.__dask_distributed_unpack__(packed)
assert unpacked["deps"] == {"x": {fut.key}, "y": {fut.key}, "z": {"y"}}
@gen_cluster(client=True)
async def test_to_sql_engine_kwargs(c, s, a, b):
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
pytest.importorskip("sqlalchemy")
df = pd.DataFrame({"a": range(10), "b": range(10)})
df.index.name = "index"
ddf = dd.from_pandas(df, npartitions=1)
with tmpfile() as f:
uri = f"sqlite:///{f}"
result = ddf.to_sql(
"test", uri, index=True, engine_kwargs={"echo": False}, compute=False
)
await c.compute(result)
dd.utils.assert_eq(
ddf,
dd.read_sql_table("test", uri, "index"),
check_divisions=False,
)
| true | true |
f71f0ceae06e8af2d70646a2a3ec49f14128a9f0 | 1,508 | py | Python | mongodb/assets/loader.py | Code360In/katacoda-scenarios-34 | b9eee8213a7fcdc5897601b745d851801a7c08b6 | [
"MIT"
] | 1 | 2020-09-10T11:55:51.000Z | 2020-09-10T11:55:51.000Z | mongodb/assets/loader.py | Code360In/katacoda-scenarios-34 | b9eee8213a7fcdc5897601b745d851801a7c08b6 | [
"MIT"
] | 1 | 2021-06-02T01:25:23.000Z | 2021-06-02T01:25:23.000Z | mongodb/assets/loader.py | Code360In/katacoda-scenarios-34 | b9eee8213a7fcdc5897601b745d851801a7c08b6 | [
"MIT"
] | 4 | 2020-10-02T06:38:39.000Z | 2022-03-05T12:20:36.000Z | """
---------
loader.py
---------
A minimal code to store data in MongoDB
"""
import csv
import json
from datetime import datetime
from pymongo import MongoClient
def load_orders():
"""Load orders sample data"""
client = MongoClient('localhost', 27017)
orders = client["orders"]
# insert customers data
customers = orders["customers"]
with open('customers.csv') as csvfile:
customers_data = list(csv.DictReader(csvfile))
_ = customers.insert_many(customers_data)
# insert items data
items_ordered = orders["items_ordered"]
with open('items_ordered.csv') as csvfile:
items_ordered_data = list(csv.DictReader(csvfile))
_ = items_ordered.insert_many(items_ordered_data)
def load_airbnb():
"""Load AirBnB sample data"""
client = MongoClient('localhost', 27017)
airbnb = client["airbnb"]
sample_data = airbnb["sample_data"]
with open("airbnb.json", "r") as f_in:
data = json.load(f_in)
for d in data:
for key, val in d.items():
if isinstance(val, dict):
if "$date" in val.keys():
d[key] = datetime.fromtimestamp(val["$date"] / 1000)
elif "$numberDecimal" in val.keys():
d[key] = val["$numberDecimal"]
try:
sample_data.insert(d)
except:
pass
def main():
"""The main script"""
load_airbnb()
load_orders()
if __name__ == "__main__":
main()
print("Done!")
| 22.507463 | 72 | 0.600133 | import csv
import json
from datetime import datetime
from pymongo import MongoClient
def load_orders():
client = MongoClient('localhost', 27017)
orders = client["orders"]
customers = orders["customers"]
with open('customers.csv') as csvfile:
customers_data = list(csv.DictReader(csvfile))
_ = customers.insert_many(customers_data)
items_ordered = orders["items_ordered"]
with open('items_ordered.csv') as csvfile:
items_ordered_data = list(csv.DictReader(csvfile))
_ = items_ordered.insert_many(items_ordered_data)
def load_airbnb():
client = MongoClient('localhost', 27017)
airbnb = client["airbnb"]
sample_data = airbnb["sample_data"]
with open("airbnb.json", "r") as f_in:
data = json.load(f_in)
for d in data:
for key, val in d.items():
if isinstance(val, dict):
if "$date" in val.keys():
d[key] = datetime.fromtimestamp(val["$date"] / 1000)
elif "$numberDecimal" in val.keys():
d[key] = val["$numberDecimal"]
try:
sample_data.insert(d)
except:
pass
def main():
load_airbnb()
load_orders()
if __name__ == "__main__":
main()
print("Done!")
| true | true |
f71f0dfec04143cf2e7de7cae4d58180583be0c7 | 7,187 | py | Python | directory/test/test_directory.py | michael-go/integrations-core | b094befc63a479e6496ad0d0c7bb340be63699fc | [
"BSD-3-Clause"
] | null | null | null | directory/test/test_directory.py | michael-go/integrations-core | b094befc63a479e6496ad0d0c7bb340be63699fc | [
"BSD-3-Clause"
] | null | null | null | directory/test/test_directory.py | michael-go/integrations-core | b094befc63a479e6496ad0d0c7bb340be63699fc | [
"BSD-3-Clause"
] | null | null | null | # (C) Datadog, Inc. 2010-2017
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
from itertools import product
import os
import shutil
import tempfile
# 3p
from nose.plugins.attrib import attr
# project
from tests.checks.common import AgentCheckTest
@attr(requires="directory")
class DirectoryTestCase(AgentCheckTest):
CHECK_NAME = 'directory'
FILE_METRICS = [
"system.disk.directory.file.bytes",
"system.disk.directory.file.modified_sec_ago",
"system.disk.directory.file.created_sec_ago"
]
HISTOGRAM_SUFFIXES = ['count', '95percentile', 'max', 'median', 'avg']
DIRECTORY_METRICS = [i1 + "." + i2 for i1, i2 in product([
"system.disk.directory.file.bytes",
"system.disk.directory.file.modified_sec_ago",
"system.disk.directory.file.created_sec_ago"
], HISTOGRAM_SUFFIXES)]
COMMON_METRICS = [
"system.disk.directory.files",
"system.disk.directory.bytes"
]
@staticmethod
def get_config_stubs(dir_name, filegauges=False):
"""
Helper to generate configs from a directory name
"""
return [
{
'directory': dir_name,
'filegauges': filegauges
}, {
'directory': dir_name,
'name': "my_beloved_directory",
'filegauges': filegauges
}, {
'directory': dir_name,
'dirtagname': "directory_custom_tagname",
'filegauges': filegauges
}, {
'directory': dir_name,
'filetagname': "file_custom_tagname",
'filegauges': filegauges
}, {
'directory': dir_name,
'dirtagname': "recursive_check",
'recursive': True,
'filegauges': filegauges
}, {
'directory': dir_name,
'dirtagname': "glob_pattern_check",
'pattern': "*.log",
'filegauges': filegauges
}, {
'directory': dir_name,
'dirtagname': "relative_pattern_check",
'pattern': "file_*",
'filegauges': filegauges
}
]
def setUp(self):
"""
Generate a directory with a file structure for tests
"""
self.temp_dir = tempfile.mkdtemp()
# Create 10 files
for i in xrange(0, 10):
open(self.temp_dir + "/file_" + str(i), 'a').close()
# Add 2 '.log' files
open(self.temp_dir + "/log_1.log", 'a').close()
open(self.temp_dir + "/log_2.log", 'a').close()
# Create a subfolder and generate files into it
os.makedirs(str(self.temp_dir) + "/subfolder")
# Create 5 subfiles
for i in xrange(0, 5):
open(self.temp_dir + "/subfolder" + '/file_' + str(i), 'a').close()
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_directory_metrics(self):
"""
Directory metric coverage
"""
config_stubs = self.get_config_stubs(self.temp_dir)
countonly_stubs = self.get_config_stubs(self.temp_dir)
# Try all the configurations in countonly mode as well
for stub in countonly_stubs:
stub['countonly'] = True
config = {
'instances': config_stubs + countonly_stubs
}
self.run_check(config)
for config in config_stubs:
dirtagname = config.get('dirtagname', "name")
name = config.get('name', self.temp_dir)
dir_tags = [dirtagname + ":%s" % name]
# Directory metrics
for mname in (self.DIRECTORY_METRICS + self.COMMON_METRICS):
self.assertMetric(mname, tags=dir_tags, count=1)
# 'recursive' and 'pattern' parameters
if config.get('pattern') == "*.log":
# 2 '*.log' files in 'temp_dir'
self.assertMetric("system.disk.directory.files", tags=dir_tags, count=1, value=2)
elif config.get('pattern') == "file_*":
# 10 'file_*' files in 'temp_dir'
self.assertMetric("system.disk.directory.files", tags=dir_tags, count=1, value=10)
elif config.get('recursive'):
# 12 files in 'temp_dir' + 5 files in 'tempdir/subfolder'
self.assertMetric("system.disk.directory.files", tags=dir_tags, count=1, value=17)
else:
# 12 files in 'temp_dir'
self.assertMetric("system.disk.directory.files", tags=dir_tags, count=1, value=12)
# Raises when coverage < 100%
self.coverage_report()
def test_file_metrics(self):
"""
File metric coverage
"""
config_stubs = self.get_config_stubs(self.temp_dir, filegauges=True)
config = {
'instances': config_stubs
}
self.run_check(config)
for config in config_stubs:
dirtagname = config.get('dirtagname', "name")
name = config.get('name', self.temp_dir)
filetagname = config.get('filetagname', "filename")
dir_tags = [dirtagname + ":%s" % name]
# File metrics
for mname in self.FILE_METRICS:
if config.get('pattern') != "file_*":
# 2 '*.log' files in 'temp_dir'
for i in xrange(1, 3):
file_tag = [filetagname + ":%s" % os.path.normpath(self.temp_dir + "/log_" + str(i) + ".log")]
self.assertMetric(mname, tags=dir_tags + file_tag, count=1)
if config.get('pattern') != "*.log":
# Files in 'temp_dir'
for i in xrange(0, 10):
file_tag = [filetagname + ":%s" % os.path.normpath(self.temp_dir + "/file_" + str(i))]
self.assertMetric(mname, tags=dir_tags + file_tag, count=1)
if not config.get('pattern'):
# Files in 'temp_dir/subfolder'
if config.get('recursive'):
for i in xrange(0, 5):
file_tag = [filetagname + ":%s" % os.path.normpath(self.temp_dir + "/subfolder" + "/file_" + str(i))]
self.assertMetric(mname, tags=dir_tags + file_tag, count=1)
# Common metrics
for mname in self.COMMON_METRICS:
self.assertMetric(mname, tags=dir_tags, count=1)
# Raises when coverage < 100%
self.coverage_report()
def test_non_existent_directory(self):
"""
Missing or inaccessible directory coverage.
"""
config = {'instances': [{'directory': '/non-existent/directory'}]}
self.assertRaises(Exception, lambda: self.run_check(config))
def test_non_existent_directory_ignore_missing(self):
config = {
'instances': [
{'directory': '/non-existent/directory',
'ignore_missing': True}
]
}
self.run_check(config)
| 34.719807 | 129 | 0.542507 |
from itertools import product
import os
import shutil
import tempfile
from nose.plugins.attrib import attr
from tests.checks.common import AgentCheckTest
@attr(requires="directory")
class DirectoryTestCase(AgentCheckTest):
CHECK_NAME = 'directory'
FILE_METRICS = [
"system.disk.directory.file.bytes",
"system.disk.directory.file.modified_sec_ago",
"system.disk.directory.file.created_sec_ago"
]
HISTOGRAM_SUFFIXES = ['count', '95percentile', 'max', 'median', 'avg']
DIRECTORY_METRICS = [i1 + "." + i2 for i1, i2 in product([
"system.disk.directory.file.bytes",
"system.disk.directory.file.modified_sec_ago",
"system.disk.directory.file.created_sec_ago"
], HISTOGRAM_SUFFIXES)]
COMMON_METRICS = [
"system.disk.directory.files",
"system.disk.directory.bytes"
]
@staticmethod
def get_config_stubs(dir_name, filegauges=False):
return [
{
'directory': dir_name,
'filegauges': filegauges
}, {
'directory': dir_name,
'name': "my_beloved_directory",
'filegauges': filegauges
}, {
'directory': dir_name,
'dirtagname': "directory_custom_tagname",
'filegauges': filegauges
}, {
'directory': dir_name,
'filetagname': "file_custom_tagname",
'filegauges': filegauges
}, {
'directory': dir_name,
'dirtagname': "recursive_check",
'recursive': True,
'filegauges': filegauges
}, {
'directory': dir_name,
'dirtagname': "glob_pattern_check",
'pattern': "*.log",
'filegauges': filegauges
}, {
'directory': dir_name,
'dirtagname': "relative_pattern_check",
'pattern': "file_*",
'filegauges': filegauges
}
]
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
for i in xrange(0, 10):
open(self.temp_dir + "/file_" + str(i), 'a').close()
open(self.temp_dir + "/log_1.log", 'a').close()
open(self.temp_dir + "/log_2.log", 'a').close()
os.makedirs(str(self.temp_dir) + "/subfolder")
for i in xrange(0, 5):
open(self.temp_dir + "/subfolder" + '/file_' + str(i), 'a').close()
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_directory_metrics(self):
config_stubs = self.get_config_stubs(self.temp_dir)
countonly_stubs = self.get_config_stubs(self.temp_dir)
for stub in countonly_stubs:
stub['countonly'] = True
config = {
'instances': config_stubs + countonly_stubs
}
self.run_check(config)
for config in config_stubs:
dirtagname = config.get('dirtagname', "name")
name = config.get('name', self.temp_dir)
dir_tags = [dirtagname + ":%s" % name]
for mname in (self.DIRECTORY_METRICS + self.COMMON_METRICS):
self.assertMetric(mname, tags=dir_tags, count=1)
if config.get('pattern') == "*.log":
self.assertMetric("system.disk.directory.files", tags=dir_tags, count=1, value=2)
elif config.get('pattern') == "file_*":
self.assertMetric("system.disk.directory.files", tags=dir_tags, count=1, value=10)
elif config.get('recursive'):
self.assertMetric("system.disk.directory.files", tags=dir_tags, count=1, value=17)
else:
self.assertMetric("system.disk.directory.files", tags=dir_tags, count=1, value=12)
self.coverage_report()
def test_file_metrics(self):
config_stubs = self.get_config_stubs(self.temp_dir, filegauges=True)
config = {
'instances': config_stubs
}
self.run_check(config)
for config in config_stubs:
dirtagname = config.get('dirtagname', "name")
name = config.get('name', self.temp_dir)
filetagname = config.get('filetagname', "filename")
dir_tags = [dirtagname + ":%s" % name]
for mname in self.FILE_METRICS:
if config.get('pattern') != "file_*":
for i in xrange(1, 3):
file_tag = [filetagname + ":%s" % os.path.normpath(self.temp_dir + "/log_" + str(i) + ".log")]
self.assertMetric(mname, tags=dir_tags + file_tag, count=1)
if config.get('pattern') != "*.log":
for i in xrange(0, 10):
file_tag = [filetagname + ":%s" % os.path.normpath(self.temp_dir + "/file_" + str(i))]
self.assertMetric(mname, tags=dir_tags + file_tag, count=1)
if not config.get('pattern'):
if config.get('recursive'):
for i in xrange(0, 5):
file_tag = [filetagname + ":%s" % os.path.normpath(self.temp_dir + "/subfolder" + "/file_" + str(i))]
self.assertMetric(mname, tags=dir_tags + file_tag, count=1)
for mname in self.COMMON_METRICS:
self.assertMetric(mname, tags=dir_tags, count=1)
self.coverage_report()
def test_non_existent_directory(self):
config = {'instances': [{'directory': '/non-existent/directory'}]}
self.assertRaises(Exception, lambda: self.run_check(config))
def test_non_existent_directory_ignore_missing(self):
config = {
'instances': [
{'directory': '/non-existent/directory',
'ignore_missing': True}
]
}
self.run_check(config)
| true | true |
f71f109340281960f4a1eab70708c99eb03b0e54 | 2,227 | py | Python | app.py | kush95300/Cluster-Maker | 886b424df2f73890ead47091d38a6008fedc0391 | [
"MIT"
] | null | null | null | app.py | kush95300/Cluster-Maker | 886b424df2f73890ead47091d38a6008fedc0391 | [
"MIT"
] | null | null | null | app.py | kush95300/Cluster-Maker | 886b424df2f73890ead47091d38a6008fedc0391 | [
"MIT"
] | null | null | null | from flask import Flask, render_template
from flask import request
from random import randint
app = Flask("Cluster Maker")
@app.route("/")
def home():
print("cluster ")
#return "Cluster Maker"
return render_template("index.html")
@app.route("/setdefault")
def setdefaults():
return render_template("default.html")
@app.route("/selectpage")
def slectpage():
return render_template("selectpage.html")
@app.route("/createdefaultcluster")
def defaultsclustermaker():
return "Creating Default Cluster"
@app.route("/createadvancecluster")
def advanceclustermaker():
return render_template("advanceform.html")
@app.route("/defaultcost")
def defaultcost():
return "Default cost Output"
@app.route("/login")
def accountauth():
return "login Page"
@app.route("/signup")
def createaccount():
return "Signup Page"
@app.route("/findcost")
def findcost():
return render_template("cost_analyser.html")
@app.route("/chooseform")
def chooseform():
return render_template("choice.html")
@app.route("/defaultdone")
def defaultdone():
return render_template("defaultdone.html")
@app.route("/costanalysis")
def analysecost():
nn = request.args.get("nn_instance_type")
dn = request.args.get("dn_instance_type")
jt = request.args.get("jt_instance_type")
tt = request.args.get("tt_instance_type")
dnc = request.args.get("dn_count")
ttc = request.args.get("tt_count")
ebs = request.args.get("ebs")
if ebs == "yes":
size = request.args.get("ebssize")
else:
size=0
usr_m = (int(dnc) + int(ttc) +2) * 0.5 + int(size) * 0.1
inr_m = usr_m*73
return " data : Cost Analysis {} {} {} {} {} {} {} {} <br> <br> Total Cost: {} $ or Rs {} ".format(nn,dn,dnc,jt,tt,ttc,ebs,size,usr_m,inr_m)
@app.route("/defaultform")
def defaultform():
print("Default Form")
return "Default Form"
@app.route("/advanceform")
def advanceform():
print("Advance Form")
return "Advance Form"
def sendotpmail(otp,email):
#ansible mail
print("send mail")
@app.route("/loginotp")
def getotp():
otp=randint(100000,999999)
email = request.args.get("email")
sendotpmail(otp,email)
return "ok" | 21.621359 | 145 | 0.662775 | from flask import Flask, render_template
from flask import request
from random import randint
app = Flask("Cluster Maker")
@app.route("/")
def home():
print("cluster ")
return render_template("index.html")
@app.route("/setdefault")
def setdefaults():
return render_template("default.html")
@app.route("/selectpage")
def slectpage():
return render_template("selectpage.html")
@app.route("/createdefaultcluster")
def defaultsclustermaker():
return "Creating Default Cluster"
@app.route("/createadvancecluster")
def advanceclustermaker():
return render_template("advanceform.html")
@app.route("/defaultcost")
def defaultcost():
return "Default cost Output"
@app.route("/login")
def accountauth():
return "login Page"
@app.route("/signup")
def createaccount():
return "Signup Page"
@app.route("/findcost")
def findcost():
return render_template("cost_analyser.html")
@app.route("/chooseform")
def chooseform():
return render_template("choice.html")
@app.route("/defaultdone")
def defaultdone():
return render_template("defaultdone.html")
@app.route("/costanalysis")
def analysecost():
nn = request.args.get("nn_instance_type")
dn = request.args.get("dn_instance_type")
jt = request.args.get("jt_instance_type")
tt = request.args.get("tt_instance_type")
dnc = request.args.get("dn_count")
ttc = request.args.get("tt_count")
ebs = request.args.get("ebs")
if ebs == "yes":
size = request.args.get("ebssize")
else:
size=0
usr_m = (int(dnc) + int(ttc) +2) * 0.5 + int(size) * 0.1
inr_m = usr_m*73
return " data : Cost Analysis {} {} {} {} {} {} {} {} <br> <br> Total Cost: {} $ or Rs {} ".format(nn,dn,dnc,jt,tt,ttc,ebs,size,usr_m,inr_m)
@app.route("/defaultform")
def defaultform():
print("Default Form")
return "Default Form"
@app.route("/advanceform")
def advanceform():
print("Advance Form")
return "Advance Form"
def sendotpmail(otp,email):
print("send mail")
@app.route("/loginotp")
def getotp():
otp=randint(100000,999999)
email = request.args.get("email")
sendotpmail(otp,email)
return "ok" | true | true |
f71f11c73d6da48e9a1284e15653009b6016156b | 2,334 | py | Python | gcloud/apigw/views/get_task_node_data.py | wkma/bk-sops | 8fb5609c0c4495c28d588fbafa9d9f5f2976929b | [
"Apache-2.0"
] | 881 | 2019-03-25T02:45:42.000Z | 2022-03-30T09:10:49.000Z | gcloud/apigw/views/get_task_node_data.py | wkma/bk-sops | 8fb5609c0c4495c28d588fbafa9d9f5f2976929b | [
"Apache-2.0"
] | 3,303 | 2019-03-25T04:18:03.000Z | 2022-03-31T11:52:03.000Z | gcloud/apigw/views/get_task_node_data.py | wkma/bk-sops | 8fb5609c0c4495c28d588fbafa9d9f5f2976929b | [
"Apache-2.0"
] | 395 | 2019-03-25T02:53:36.000Z | 2022-03-31T08:37:28.000Z | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import ujson as json
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_GET
from blueapps.account.decorators import login_exempt
from gcloud import err_code
from gcloud.apigw.decorators import mark_request_whether_is_trust
from gcloud.apigw.decorators import project_inject
from gcloud.taskflow3.models import TaskFlowInstance
from gcloud.iam_auth.intercept import iam_intercept
from gcloud.iam_auth.view_interceptors.apigw import TaskViewInterceptor
from packages.bkoauth.decorators import apigw_required
@login_exempt
@csrf_exempt
@require_GET
@apigw_required
@mark_request_whether_is_trust
@project_inject
@iam_intercept(TaskViewInterceptor())
def get_task_node_data(request, task_id, project_id):
project = request.project
task = TaskFlowInstance.objects.get(id=task_id, project_id=project.id)
node_id = request.GET.get("node_id")
component_code = request.GET.get("component_code")
loop = request.GET.get("loop")
try:
subprocess_stack = json.loads(request.GET.get("subprocess_stack", "[]"))
except Exception:
return {
"result": False,
"message": "subprocess_stack is not a valid array json",
"code": err_code.REQUEST_PARAM_INVALID.code,
}
data = task.get_node_data(node_id, request.user.username, component_code, subprocess_stack, loop)
return {
"result": data["result"],
"data": data["data"],
"message": data["message"],
"code": err_code.SUCCESS.code if data["result"] else err_code.UNKNOWN_ERROR.code,
}
| 38.262295 | 115 | 0.758783 |
import ujson as json
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_GET
from blueapps.account.decorators import login_exempt
from gcloud import err_code
from gcloud.apigw.decorators import mark_request_whether_is_trust
from gcloud.apigw.decorators import project_inject
from gcloud.taskflow3.models import TaskFlowInstance
from gcloud.iam_auth.intercept import iam_intercept
from gcloud.iam_auth.view_interceptors.apigw import TaskViewInterceptor
from packages.bkoauth.decorators import apigw_required
@login_exempt
@csrf_exempt
@require_GET
@apigw_required
@mark_request_whether_is_trust
@project_inject
@iam_intercept(TaskViewInterceptor())
def get_task_node_data(request, task_id, project_id):
project = request.project
task = TaskFlowInstance.objects.get(id=task_id, project_id=project.id)
node_id = request.GET.get("node_id")
component_code = request.GET.get("component_code")
loop = request.GET.get("loop")
try:
subprocess_stack = json.loads(request.GET.get("subprocess_stack", "[]"))
except Exception:
return {
"result": False,
"message": "subprocess_stack is not a valid array json",
"code": err_code.REQUEST_PARAM_INVALID.code,
}
data = task.get_node_data(node_id, request.user.username, component_code, subprocess_stack, loop)
return {
"result": data["result"],
"data": data["data"],
"message": data["message"],
"code": err_code.SUCCESS.code if data["result"] else err_code.UNKNOWN_ERROR.code,
}
| true | true |
f71f11e951868b67b7b2bbf023a64e9a2659dfc5 | 10,076 | py | Python | qiskit/circuit/gate.py | ntgiwsvp/qiskit-terra | 206b8bcc930817d88f8244f7b984880aecde959d | [
"Apache-2.0"
] | null | null | null | qiskit/circuit/gate.py | ntgiwsvp/qiskit-terra | 206b8bcc930817d88f8244f7b984880aecde959d | [
"Apache-2.0"
] | null | null | null | qiskit/circuit/gate.py | ntgiwsvp/qiskit-terra | 206b8bcc930817d88f8244f7b984880aecde959d | [
"Apache-2.0"
] | null | null | null | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Unitary gate."""
from warnings import warn
from typing import List, Optional, Union, Tuple
import numpy as np
from scipy.linalg import schur
from qiskit.circuit.parameter import ParameterExpression
from qiskit.circuit.exceptions import CircuitError
from .instruction import Instruction
class Gate(Instruction):
"""Unitary gate."""
def __init__(self, name: str, num_qubits: int, params: List,
label: Optional[str] = None) -> None:
"""Create a new gate.
Args:
name: The Qobj name of the gate.
num_qubits: The number of qubits the gate acts on.
params: A list of parameters.
label: An optional label for the gate.
"""
self._label = label
self.definition = None
super().__init__(name, num_qubits, 0, params)
# Set higher priority than Numpy array and matrix classes
__array_priority__ = 20
def to_matrix(self) -> np.ndarray:
"""Return a Numpy.array for the gate unitary matrix.
Returns:
np.ndarray: if the Gate subclass has a matrix defintion.
Raises:
CircuitError: If a Gate subclass does not implement this method an
exception will be raised when this base class method is called.
"""
if hasattr(self, '__array__'):
# pylint: disable=no-member
return self.__array__(dtype=complex)
raise CircuitError("to_matrix not defined for this {}".format(type(self)))
def power(self, exponent: float):
"""Creates a unitary gate as `gate^exponent`.
Args:
exponent (float): Gate^exponent
Returns:
qiskit.extensions.UnitaryGate: To which `to_matrix` is self.to_matrix^exponent.
Raises:
CircuitError: If Gate is not unitary
"""
from qiskit.quantum_info.operators import Operator # pylint: disable=cyclic-import
from qiskit.extensions.unitary import UnitaryGate # pylint: disable=cyclic-import
# Should be diagonalized because it's a unitary.
decomposition, unitary = schur(Operator(self).data, output='complex')
# Raise the diagonal entries to the specified power
decomposition_power = list()
decomposition_diagonal = decomposition.diagonal()
# assert off-diagonal are 0
if not np.allclose(np.diag(decomposition_diagonal), decomposition):
raise CircuitError('The matrix is not diagonal')
for element in decomposition_diagonal:
decomposition_power.append(pow(element, exponent))
# Then reconstruct the resulting gate.
unitary_power = unitary @ np.diag(decomposition_power) @ unitary.conj().T
return UnitaryGate(unitary_power, label='%s^%s' % (self.name, exponent))
def _return_repeat(self, exponent: float) -> 'Gate':
return Gate(name="%s*%s" % (self.name, exponent), num_qubits=self.num_qubits,
params=self.params)
def assemble(self) -> 'Instruction':
"""Assemble a QasmQobjInstruction"""
instruction = super().assemble()
if self.label:
instruction.label = self.label
return instruction
@property
def label(self) -> str:
"""Return gate label"""
return self._label
@label.setter
def label(self, name: str):
"""Set gate label to name
Args:
name (str or None): label to assign unitary
Raises:
TypeError: name is not string or None.
"""
if isinstance(name, (str, type(None))):
self._label = name
else:
raise TypeError('label expects a string or None')
def control(self, num_ctrl_qubits: Optional[int] = 1, label: Optional[str] = None,
ctrl_state: Optional[Union[int, str]] = None):
"""Return controlled version of gate. See :class:`.ControlledGate` for usage.
Args:
num_ctrl_qubits: number of controls to add to gate (default=1)
label: optional gate label
ctrl_state: The control state in decimal or as a bitstring
(e.g. '111'). If None, use 2**num_ctrl_qubits-1.
Returns:
qiskit.circuit.ControlledGate: Controlled version of gate. This default algorithm
uses num_ctrl_qubits-1 ancillae qubits so returns a gate of size
num_qubits + 2*num_ctrl_qubits - 1.
Raises:
QiskitError: unrecognized mode or invalid ctrl_state
"""
# pylint: disable=cyclic-import
from .add_control import add_control
return add_control(self, num_ctrl_qubits, label, ctrl_state)
@staticmethod
def _broadcast_single_argument(qarg: List) -> List:
"""Expands a single argument.
For example: [q[0], q[1]] -> [q[0]], [q[1]]
"""
# [q[0], q[1]] -> [q[0]]
# -> [q[1]]
for arg0 in qarg:
yield [arg0], []
@staticmethod
def _broadcast_2_arguments(qarg0: List, qarg1: List) -> List:
if len(qarg0) == len(qarg1):
# [[q[0], q[1]], [r[0], r[1]]] -> [q[0], r[0]]
# -> [q[1], r[1]]
for arg0, arg1 in zip(qarg0, qarg1):
yield [arg0, arg1], []
elif len(qarg0) == 1:
# [[q[0]], [r[0], r[1]]] -> [q[0], r[0]]
# -> [q[0], r[1]]
for arg1 in qarg1:
yield [qarg0[0], arg1], []
elif len(qarg1) == 1:
# [[q[0], q[1]], [r[0]]] -> [q[0], r[0]]
# -> [q[1], r[0]]
for arg0 in qarg0:
yield [arg0, qarg1[0]], []
else:
raise CircuitError('Not sure how to combine these two-qubit arguments:\n %s\n %s' %
(qarg0, qarg1))
@staticmethod
def _broadcast_3_or_more_args(qargs: List) -> List:
if all(len(qarg) == len(qargs[0]) for qarg in qargs):
for arg in zip(*qargs):
yield list(arg), []
else:
raise CircuitError(
'Not sure how to combine these qubit arguments:\n %s\n' % qargs)
def broadcast_arguments(self, qargs: List, cargs: List) -> Tuple[List, List]:
"""Validation and handling of the arguments and its relationship.
For example, ``cx([q[0],q[1]], q[2])`` means ``cx(q[0], q[2]); cx(q[1], q[2])``. This
method yields the arguments in the right grouping. In the given example::
in: [[q[0],q[1]], q[2]],[]
outs: [q[0], q[2]], []
[q[1], q[2]], []
The general broadcasting rules are:
* If len(qargs) == 1::
[q[0], q[1]] -> [q[0]],[q[1]]
* If len(qargs) == 2::
[[q[0], q[1]], [r[0], r[1]]] -> [q[0], r[0]], [q[1], r[1]]
[[q[0]], [r[0], r[1]]] -> [q[0], r[0]], [q[0], r[1]]
[[q[0], q[1]], [r[0]]] -> [q[0], r[0]], [q[1], r[0]]
* If len(qargs) >= 3::
[q[0], q[1]], [r[0], r[1]], ...] -> [q[0], r[0], ...], [q[1], r[1], ...]
Args:
qargs: List of quantum bit arguments.
cargs: List of classical bit arguments.
Returns:
A tuple with single arguments.
Raises:
CircuitError: If the input is not valid. For example, the number of
arguments does not match the gate expectation.
"""
if len(qargs) != self.num_qubits or cargs:
raise CircuitError(
'The amount of qubit/clbit arguments does not match the gate expectation.')
if any([not qarg for qarg in qargs]):
raise CircuitError('One or more of the arguments are empty')
if len(qargs) == 1:
return Gate._broadcast_single_argument(qargs[0])
elif len(qargs) == 2:
return Gate._broadcast_2_arguments(qargs[0], qargs[1])
elif len(qargs) >= 3:
return Gate._broadcast_3_or_more_args(qargs)
else:
raise CircuitError('This gate cannot handle %i arguments' % len(qargs))
def validate_parameter(self, parameter):
"""Gate parameters should be int, float, or ParameterExpression"""
if isinstance(parameter, ParameterExpression):
if len(parameter.parameters) > 0:
return parameter # expression has free parameters, we cannot validate it
if not parameter._symbol_expr.is_real:
raise CircuitError("Bound parameter expression is complex in gate {}".format(
self.name))
return parameter # per default assume parameters must be real when bound
if isinstance(parameter, (int, float)):
return parameter
elif isinstance(parameter, (np.integer, np.floating)):
return parameter.item()
elif isinstance(parameter, np.ndarray):
warn("Gate param type %s is being deprecated as of 0.16.0, and will be removed "
"no earlier than 3 months after that release date. "
"Considering creating your own Gate subclass with the method validate_parameter "
" to allow this param type." % type(parameter), DeprecationWarning, 3)
return parameter
else:
raise CircuitError("Invalid param type {0} for gate {1}.".format(type(parameter),
self.name))
| 38.903475 | 98 | 0.568678 |
from warnings import warn
from typing import List, Optional, Union, Tuple
import numpy as np
from scipy.linalg import schur
from qiskit.circuit.parameter import ParameterExpression
from qiskit.circuit.exceptions import CircuitError
from .instruction import Instruction
class Gate(Instruction):
def __init__(self, name: str, num_qubits: int, params: List,
label: Optional[str] = None) -> None:
self._label = label
self.definition = None
super().__init__(name, num_qubits, 0, params)
__array_priority__ = 20
def to_matrix(self) -> np.ndarray:
if hasattr(self, '__array__'):
return self.__array__(dtype=complex)
raise CircuitError("to_matrix not defined for this {}".format(type(self)))
def power(self, exponent: float):
from qiskit.quantum_info.operators import Operator
from qiskit.extensions.unitary import UnitaryGate
decomposition, unitary = schur(Operator(self).data, output='complex')
# Raise the diagonal entries to the specified power
decomposition_power = list()
decomposition_diagonal = decomposition.diagonal()
# assert off-diagonal are 0
if not np.allclose(np.diag(decomposition_diagonal), decomposition):
raise CircuitError('The matrix is not diagonal')
for element in decomposition_diagonal:
decomposition_power.append(pow(element, exponent))
# Then reconstruct the resulting gate.
unitary_power = unitary @ np.diag(decomposition_power) @ unitary.conj().T
return UnitaryGate(unitary_power, label='%s^%s' % (self.name, exponent))
def _return_repeat(self, exponent: float) -> 'Gate':
return Gate(name="%s*%s" % (self.name, exponent), num_qubits=self.num_qubits,
params=self.params)
def assemble(self) -> 'Instruction':
instruction = super().assemble()
if self.label:
instruction.label = self.label
return instruction
@property
def label(self) -> str:
return self._label
@label.setter
def label(self, name: str):
if isinstance(name, (str, type(None))):
self._label = name
else:
raise TypeError('label expects a string or None')
def control(self, num_ctrl_qubits: Optional[int] = 1, label: Optional[str] = None,
ctrl_state: Optional[Union[int, str]] = None):
# pylint: disable=cyclic-import
from .add_control import add_control
return add_control(self, num_ctrl_qubits, label, ctrl_state)
@staticmethod
def _broadcast_single_argument(qarg: List) -> List:
# [q[0], q[1]] -> [q[0]]
# -> [q[1]]
for arg0 in qarg:
yield [arg0], []
@staticmethod
def _broadcast_2_arguments(qarg0: List, qarg1: List) -> List:
if len(qarg0) == len(qarg1):
# [[q[0], q[1]], [r[0], r[1]]] -> [q[0], r[0]]
# -> [q[1], r[1]]
for arg0, arg1 in zip(qarg0, qarg1):
yield [arg0, arg1], []
elif len(qarg0) == 1:
# [[q[0]], [r[0], r[1]]] -> [q[0], r[0]]
# -> [q[0], r[1]]
for arg1 in qarg1:
yield [qarg0[0], arg1], []
elif len(qarg1) == 1:
# [[q[0], q[1]], [r[0]]] -> [q[0], r[0]]
# -> [q[1], r[0]]
for arg0 in qarg0:
yield [arg0, qarg1[0]], []
else:
raise CircuitError('Not sure how to combine these two-qubit arguments:\n %s\n %s' %
(qarg0, qarg1))
@staticmethod
def _broadcast_3_or_more_args(qargs: List) -> List:
if all(len(qarg) == len(qargs[0]) for qarg in qargs):
for arg in zip(*qargs):
yield list(arg), []
else:
raise CircuitError(
'Not sure how to combine these qubit arguments:\n %s\n' % qargs)
def broadcast_arguments(self, qargs: List, cargs: List) -> Tuple[List, List]:
if len(qargs) != self.num_qubits or cargs:
raise CircuitError(
'The amount of qubit/clbit arguments does not match the gate expectation.')
if any([not qarg for qarg in qargs]):
raise CircuitError('One or more of the arguments are empty')
if len(qargs) == 1:
return Gate._broadcast_single_argument(qargs[0])
elif len(qargs) == 2:
return Gate._broadcast_2_arguments(qargs[0], qargs[1])
elif len(qargs) >= 3:
return Gate._broadcast_3_or_more_args(qargs)
else:
raise CircuitError('This gate cannot handle %i arguments' % len(qargs))
def validate_parameter(self, parameter):
if isinstance(parameter, ParameterExpression):
if len(parameter.parameters) > 0:
return parameter # expression has free parameters, we cannot validate it
if not parameter._symbol_expr.is_real:
raise CircuitError("Bound parameter expression is complex in gate {}".format(
self.name))
return parameter # per default assume parameters must be real when bound
if isinstance(parameter, (int, float)):
return parameter
elif isinstance(parameter, (np.integer, np.floating)):
return parameter.item()
elif isinstance(parameter, np.ndarray):
warn("Gate param type %s is being deprecated as of 0.16.0, and will be removed "
"no earlier than 3 months after that release date. "
"Considering creating your own Gate subclass with the method validate_parameter "
" to allow this param type." % type(parameter), DeprecationWarning, 3)
return parameter
else:
raise CircuitError("Invalid param type {0} for gate {1}.".format(type(parameter),
self.name))
| true | true |
f71f1355b6da4b3a6ca66001c03db66ddef1e71c | 95,602 | py | Python | nodes.py | elrnv/RenderManForBlender | ca6b2ce1fb8b9e4acb893dfe640067c1beaa3c36 | [
"MIT"
] | null | null | null | nodes.py | elrnv/RenderManForBlender | ca6b2ce1fb8b9e4acb893dfe640067c1beaa3c36 | [
"MIT"
] | null | null | null | nodes.py | elrnv/RenderManForBlender | ca6b2ce1fb8b9e4acb893dfe640067c1beaa3c36 | [
"MIT"
] | null | null | null | # ##### BEGIN MIT LICENSE BLOCK #####
#
# Copyright (c) 2015 - 2017 Pixar
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#
# ##### END MIT LICENSE BLOCK #####
import bpy
import _cycles
from bpy.app.handlers import persistent
import xml.etree.ElementTree as ET
import tempfile
import nodeitems_utils
import shutil
from bpy.props import *
from nodeitems_utils import NodeCategory, NodeItem
from .shader_parameters import class_generate_properties
from .shader_parameters import node_add_inputs
from .shader_parameters import node_add_outputs
from .shader_parameters import socket_map
from .shader_parameters import txmake_options, update_conditional_visops
from .util import args_files_in_path
from .util import get_path_list
from .util import rib
from .util import debug
from .util import user_path
from .util import get_real_path
from .util import readOSO
from .cycles_convert import *
from operator import attrgetter, itemgetter
import os.path
from time import sleep
import traceback
NODE_LAYOUT_SPLIT = 0.5
group_nodes = ['ShaderNodeGroup', 'NodeGroupInput', 'NodeGroupOutput']
# Default Types
# update node during ipr for a socket default_value
def update_func(self, context):
# check if this prop is set on an input
node = self.node if hasattr(self, 'node') else self
from . import engine
if engine.is_ipr_running():
engine.ipr.issue_shader_edits(node=node)
# socket name corresponds to the param on the node
class RendermanSocket:
ui_open: BoolProperty(name='UI Open', default=True)
def get_pretty_name(self, node):
if node.bl_idname in group_nodes:
return self.name
else:
return self.identifier
def get_value(self, node):
if node.bl_idname in group_nodes or not hasattr(node, self.name):
return self.default_value
else:
return getattr(node, self.name)
def draw_color(self, context, node):
return (0.25, 1.0, 0.25, 1.0)
def draw_value(self, context, layout, node):
layout.prop(node, self.identifier)
def draw(self, context, layout, node, text):
if self.is_linked or self.is_output or self.hide_value or not hasattr(self, 'default_value'):
layout.label(self.get_pretty_name(node))
elif node.bl_idname in group_nodes or node.bl_idname == "PxrOSLPatternNode":
layout.prop(self, 'default_value',
text=self.get_pretty_name(node), slider=True)
else:
layout.prop(node, self.name,
text=self.get_pretty_name(node), slider=True)
class RendermanSocketInterface:
def draw_color(self, context):
return (0.25, 1.0, 0.25, 1.0)
def draw(self, context, layout):
layout.label(self.name)
def from_socket(self, node, socket):
if hasattr(self, 'default_value'):
self.default_value = socket.get_value(node)
self.name = socket.name
def init_socket(self, node, socket, data_path):
sleep(.01)
socket.name = self.name
if hasattr(self, 'default_value'):
socket.default_value = self.default_value
# socket types (need this just for the ui_open)
class RendermanNodeSocketFloat(bpy.types.NodeSocketFloat, RendermanSocket):
'''RenderMan float input/output'''
bl_idname = 'RendermanNodeSocketFloat'
bl_label = 'RenderMan Float Socket'
default_value: FloatProperty(update=update_func)
renderman_type: StringProperty(default='float')
def draw_color(self, context, node):
return (0.5, 0.5, 0.5, 1.0)
class RendermanNodeSocketInterfaceFloat(bpy.types.NodeSocketInterfaceFloat, RendermanSocketInterface):
'''RenderMan float input/output'''
bl_idname = 'RendermanNodeSocketInterfaceFloat'
bl_label = 'RenderMan Float Socket'
bl_socket_idname = 'RendermanNodeSocketFloat'
default_value: FloatProperty()
def draw_color(self, context):
return (0.5, 0.5, 0.5, 1.0)
class RendermanNodeSocketInt(bpy.types.NodeSocketInt, RendermanSocket):
'''RenderMan int input/output'''
bl_idname = 'RendermanNodeSocketInt'
bl_label = 'RenderMan Int Socket'
default_value: IntProperty(update=update_func)
renderman_type: StringProperty(default='int')
def draw_color(self, context, node):
return (1.0, 1.0, 1.0, 1.0)
class RendermanNodeSocketInterfaceInt(bpy.types.NodeSocketInterfaceInt, RendermanSocketInterface):
'''RenderMan float input/output'''
bl_idname = 'RendermanNodeSocketInterfaceInt'
bl_label = 'RenderMan Int Socket'
bl_socket_idname = 'RendermanNodeSocketInt'
default_value: IntProperty()
def draw_color(self, context):
return (1.0, 1.0, 1.0, 1.0)
class RendermanNodeSocketString(bpy.types.NodeSocketString, RendermanSocket):
'''RenderMan string input/output'''
bl_idname = 'RendermanNodeSocketString'
bl_label = 'RenderMan String Socket'
default_value: StringProperty(update=update_func)
is_texture: BoolProperty(default=False)
renderman_type: StringProperty(default='string')
class RendermanNodeSocketStruct(bpy.types.NodeSocketString, RendermanSocket):
'''RenderMan struct input/output'''
bl_idname = 'RendermanNodeSocketStruct'
bl_label = 'RenderMan Struct Socket'
hide_value = True
renderman_type = 'string'
default_value = ''
class RendermanNodeSocketInterfaceStruct(bpy.types.NodeSocketInterfaceString, RendermanSocketInterface):
'''RenderMan struct input/output'''
bl_idname = 'RendermanNodeSocketInterfaceStruct'
bl_label = 'RenderMan Struct Socket'
bl_socket_idname = 'RendermanNodeSocketStruct'
hide_value = True
class RendermanNodeSocketColor(bpy.types.NodeSocketColor, RendermanSocket):
'''RenderMan color input/output'''
bl_idname = 'RendermanNodeSocketColor'
bl_label = 'RenderMan Color Socket'
default_value: FloatVectorProperty(size=3,
subtype="COLOR", update=update_func)
renderman_type: StringProperty(default='color')
def draw_color(self, context, node):
return (1.0, 1.0, .5, 1.0)
class RendermanNodeSocketInterfaceColor(bpy.types.NodeSocketInterfaceColor, RendermanSocketInterface):
'''RenderMan color input/output'''
bl_idname = 'RendermanNodeSocketInterfaceColor'
bl_label = 'RenderMan Color Socket'
bl_socket_idname = 'RendermanNodeSocketColor'
default_value: FloatVectorProperty(size=3,
subtype="COLOR")
def draw_color(self, context):
return (1.0, 1.0, .5, 1.0)
class RendermanNodeSocketVector(RendermanSocket, bpy.types.NodeSocketVector):
'''RenderMan vector input/output'''
bl_idname = 'RendermanNodeSocketVector'
bl_label = 'RenderMan Vector Socket'
hide_value = True
default_value: FloatVectorProperty(size=3,
subtype="EULER", update=update_func)
renderman_type: StringProperty(default='vector')
def draw_color(self, context, node):
return (.25, .25, .75, 1.0)
class RendermanNodeSocketInterfaceVector(bpy.types.NodeSocketInterfaceVector, RendermanSocketInterface):
'''RenderMan color input/output'''
bl_idname = 'RendermanNodeSocketInterfaceVector'
bl_label = 'RenderMan Vector Socket'
bl_socket_idname = 'RendermanNodeSocketVector'
hide_value = True
default_value: FloatVectorProperty(size=3,
subtype="EULER")
def draw_color(self, context):
return (.25, .25, .75, 1.0)
# Custom socket type for connecting shaders
class RendermanShaderSocket(bpy.types.NodeSocketShader, RendermanSocket):
'''RenderMan shader input/output'''
bl_idname = 'RendermanShaderSocket'
bl_label = 'RenderMan Shader Socket'
hide_value = True
# Custom socket type for connecting shaders
class RendermanShaderSocketInterface(bpy.types.NodeSocketInterfaceShader, RendermanSocketInterface):
'''RenderMan shader input/output'''
bl_idname = 'RendermanShaderInterfaceSocket'
bl_label = 'RenderMan Shader Socket'
bl_socket_idname = 'RendermanShaderSocket'
hide_value = True
# Base class for all custom nodes in this tree type.
# Defines a poll function to enable instantiation.
class RendermanShadingNode(bpy.types.ShaderNode):
bl_label = 'Output'
def update_mat(self, mat):
if self.renderman_node_type == 'bxdf' and self.outputs['Bxdf'].is_linked:
mat.specular_color = [1, 1, 1]
mat.diffuse_color = [1, 1, 1]
mat.use_transparency = False
mat.specular_intensity = 0
mat.diffuse_intensity = 1
if hasattr(self, "baseColor"):
mat.diffuse_color = self.baseColor
elif hasattr(self, "emitColor"):
mat.diffuse_color = self.emitColor
elif hasattr(self, "diffuseColor"):
mat.diffuse_color = self.diffuseColor
elif hasattr(self, "midColor"):
mat.diffuse_color = self.midColor
elif hasattr(self, "transmissionColor"):
mat.diffuse_color = self.transmissionColor
elif hasattr(self, "frontColor"):
mat.diffuse_color = self.frontColor
# specular intensity
if hasattr(self, "specular"):
mat.specular_intensity = self.specular
elif hasattr(self, "SpecularGainR"):
mat.specular_intensity = self.specularGainR
elif hasattr(self, "reflectionGain"):
mat.specular_intensity = self.reflectionGain
# specular color
if hasattr(self, "specularColor"):
mat.specular_color = self.specularColor
elif hasattr(self, "reflectionColor"):
mat.specular_color = self.reflectionColor
if self.bl_idname in ["PxrGlassBxdfNode", "PxrLMGlassBxdfNode"]:
mat.use_transparency = True
mat.alpha = .5
if self.bl_idname == "PxrLMMetalBxdfNode":
mat.diffuse_color = [0, 0, 0]
mat.specular_intensity = 1
mat.specular_color = self.specularColor
mat.mirror_color = [1, 1, 1]
elif self.bl_idname == "PxrLMPlasticBxdfNode":
mat.specular_intensity = 1
# all the properties of a shader will go here, also inputs/outputs
# on connectable props will have the same name
# node_props = None
def draw_buttons(self, context, layout):
self.draw_nonconnectable_props(context, layout, self.prop_names)
if self.bl_idname == "PxrOSLPatternNode":
layout.operator("node.refresh_osl_shader")
def draw_buttons_ext(self, context, layout):
self.draw_nonconnectable_props(context, layout, self.prop_names)
def draw_nonconnectable_props(self, context, layout, prop_names):
if self.bl_idname in ['PxrLayerPatternNode', 'PxrSurfaceBxdfNode']:
col = layout.column(align=True)
for prop_name in prop_names:
if prop_name not in self.inputs:
for name in getattr(self, prop_name):
if name.startswith('enable'):
col.prop(self, name, text=prop_name.split('.')[-1])
break
return
if self.bl_idname == "PxrOSLPatternNode" or self.bl_idname == "PxrSeExprPatternNode":
prop = getattr(self, "codetypeswitch")
layout.prop(self, "codetypeswitch")
if getattr(self, "codetypeswitch") == 'INT':
prop = getattr(self, "internalSearch")
layout.prop_search(
self, "internalSearch", bpy.data, "texts", text="")
elif getattr(self, "codetypeswitch") == 'EXT':
prop = getattr(self, "shadercode")
layout.prop(self, "shadercode")
elif getattr(self, "codetypeswitch") == 'NODE':
layout.prop(self, "expression")
else:
# temp until we can create ramps natively
if self.__annotations__['plugin_name'] == 'PxrRamp':
nt = bpy.data.node_groups[self.node_group]
if nt:
layout.template_color_ramp(
nt.nodes["ColorRamp"], 'color_ramp')
for prop_name in prop_names:
prop_meta = self.prop_meta[prop_name]
if 'widget' in prop_meta and prop_meta['widget'] == 'null' or \
'hidden' in prop_meta and prop_meta['hidden']:
continue
if prop_name not in self.inputs:
if prop_meta['renderman_type'] == 'page':
ui_prop = prop_name + "_uio"
ui_open = getattr(self, ui_prop)
icon = 'DISCLOSURE_TRI_DOWN' if ui_open \
else 'DISCLOSURE_TRI_RIGHT'
split = layout.split(NODE_LAYOUT_SPLIT)
row = split.row()
row.prop(self, ui_prop, icon=icon, text='',
icon_only=True, emboss=False, slider=True)
row.label(prop_name.split('.')[-1] + ':')
if ui_open:
prop = getattr(self, prop_name)
self.draw_nonconnectable_props(
context, layout, prop)
elif "Subset" in prop_name and prop_meta['type'] == 'string':
layout.prop_search(self, prop_name, bpy.data.scenes[0].renderman,
"object_groups")
else:
layout.prop(self, prop_name, slider=True)
def copy(self, node):
pass
# self.inputs.clear()
# self.outputs.clear()
def RefreshNodes(self, context, nodeOR=None, materialOverride=None):
# Compile shader. If the call was from socket draw get the node
# information anther way.
if hasattr(context, "node"):
node = context.node
else:
node = nodeOR
prefs = bpy.context.preferences.addons[__package__].preferences
out_path = user_path(prefs.env_vars.out)
compile_path = os.path.join(user_path(prefs.env_vars.out), "shaders")
if os.path.exists(out_path):
pass
else:
os.mkdir(out_path)
if os.path.exists(os.path.join(out_path, "shaders")):
pass
else:
os.mkdir(os.path.join(out_path, "shaders"))
if getattr(node, "codetypeswitch") == "EXT":
osl_path = user_path(getattr(node, 'shadercode'))
FileName = os.path.basename(osl_path)
FileNameNoEXT = os.path.splitext(FileName)[0]
FileNameOSO = FileNameNoEXT
FileNameOSO += ".oso"
export_path = os.path.join(
user_path(prefs.env_vars.out), "shaders", FileNameOSO)
if os.path.splitext(FileName)[1] == ".oso":
out_file = os.path.join(user_path(prefs.env_vars.out), "shaders", FileNameOSO)
if not os.path.exists(out_file) or not os.path.samefile(osl_path, out_file):
shutil.copy(osl_path, out_file)
# Assume that the user knows what they were doing when they
# compiled the osl file.
ok = True
else:
ok = node.compile_osl(osl_path, compile_path)
elif getattr(node, "codetypeswitch") == "INT" and node.internalSearch:
script = bpy.data.texts[node.internalSearch]
osl_path = bpy.path.abspath(
script.filepath, library=script.library)
if script.is_in_memory or script.is_dirty or \
script.is_modified or not os.path.exists(osl_path):
osl_file = tempfile.NamedTemporaryFile(
mode='w', suffix=".osl", delete=False)
osl_file.write(script.as_string())
osl_file.close()
FileNameNoEXT = os.path.splitext(script.name)[0]
FileNameOSO = FileNameNoEXT
FileNameOSO += ".oso"
node.__annotations__['plugin_name'] = FileNameNoEXT
ok = node.compile_osl(osl_file.name, compile_path, script.name)
export_path = os.path.join(
user_path(prefs.env_vars.out), "shaders", FileNameOSO)
os.remove(osl_file.name)
else:
ok = node.compile_osl(osl_path, compile_path)
FileName = os.path.basename(osl_path)
FileNameNoEXT = os.path.splitext(FileName)[0]
node.__annotations__['plugin_name'] = FileNameNoEXT
FileNameOSO = FileNameNoEXT
FileNameOSO += ".oso"
export_path = os.path.join(
user_path(prefs.env_vars.out), "shaders", FileNameOSO)
else:
ok = False
debug("osl", "Shader cannot be compiled. Shader name not specified")
# If Shader compiled successfully then update node.
if ok:
debug('osl', "Shader Compiled Successfully!")
# Reset the inputs and outputs
node.outputs.clear()
node.inputs.clear()
# Read in new properties
prop_names, shader_meta = readOSO(export_path)
debug('osl', prop_names, "MetaInfo: ", shader_meta)
# Set node name to shader name
node.label = shader_meta["shader"]
node.__annotations__['plugin_name'] = shader_meta["shader"]
# Generate new inputs and outputs
setattr(node, 'shader_meta', shader_meta)
node.setOslProps(prop_names, shader_meta)
else:
debug("osl", "NODE COMPILATION FAILED")
def compile_osl(self, inFile, outPath, nameOverride=""):
if not nameOverride:
FileName = os.path.basename(inFile)
FileNameNoEXT = os.path.splitext(FileName)[0]
out_file = os.path.join(outPath, FileNameNoEXT)
out_file += ".oso"
else:
FileNameNoEXT = os.path.splitext(nameOverride)[0]
out_file = os.path.join(outPath, FileNameNoEXT)
out_file += ".oso"
ok = _cycles.osl_compile(inFile, out_file)
return ok
def update(self):
debug("info", "UPDATING: ", self.name)
@classmethod
def poll(cls, ntree):
if hasattr(ntree, 'bl_idname'):
return ntree.bl_idname == 'ShaderNodeTree'
else:
return True
def setOslProps(self, prop_names, shader_meta):
for prop_name in prop_names:
prop_type = shader_meta[prop_name]["type"]
if shader_meta[prop_name]["IO"] == "out":
self.outputs.new(
socket_map[prop_type], prop_name)
else:
prop_default = shader_meta[prop_name]["default"]
if prop_type == "float":
prop_default = float(prop_default)
elif prop_type == "int":
prop_default = int(float(prop_default))
if prop_type == "matrix":
self.inputs.new(socket_map["struct"], prop_name, prop_name)
elif prop_type == "void":
pass
elif 'lockgeom' in shader_meta[prop_name] and shader_meta[prop_name]['lockgeom'] == 0:
pass
else:
input = self.inputs.new(socket_map[shader_meta[prop_name]["type"]],
prop_name, prop_name)
input.default_value = prop_default
if prop_type == 'struct' or prop_type == 'point':
input.hide_value = True
input.renderman_type = prop_type
debug('osl', "Shader: ", shader_meta["shader"], "Properties: ",
prop_names, "Shader meta data: ", shader_meta)
compileLocation = self.name + "Compile"
class RendermanOutputNode(RendermanShadingNode):
bl_label = 'RenderMan Material'
renderman_node_type = 'output'
bl_icon = 'MATERIAL'
node_tree = None
def init(self, context):
input = self.inputs.new('RendermanShaderSocket', 'Bxdf')
input.type = 'SHADER'
input.hide_value = True
input = self.inputs.new('RendermanShaderSocket', 'Light')
input.hide_value = True
input = self.inputs.new('RendermanShaderSocket', 'Displacement')
input.hide_value = True
def draw_buttons(self, context, layout):
return
def draw_buttons_ext(self, context, layout):
return
# when a connection is made or removed see if we're in IPR mode and issue
# updates
def update(self):
from . import engine
if engine.is_ipr_running():
engine.ipr.last_edit_mat = None
engine.ipr.issue_shader_edits(nt=self.id_data)
# Final output node, used as a dummy to find top level shaders
class RendermanBxdfNode(RendermanShadingNode):
bl_label = 'Bxdf'
renderman_node_type = 'bxdf'
shading_compatibility = {'NEW_SHADING'}
class RendermanDisplacementNode(RendermanShadingNode):
bl_label = 'Displacement'
renderman_node_type = 'displacement'
# Final output node, used as a dummy to find top level shaders
class RendermanPatternNode(RendermanShadingNode):
bl_label = 'Texture'
renderman_node_type = 'pattern'
bl_type = 'TEX_IMAGE'
bl_static_type = 'TEX_IMAGE'
class RendermanLightNode(RendermanShadingNode):
bl_label = 'Output'
renderman_node_type = 'light'
# Generate dynamic types
def generate_node_type(prefs, name, args):
''' Dynamically generate a node type from pattern '''
nodeType = args.find("shaderType/tag").attrib['value']
typename = '%s%sNode' % (name, nodeType.capitalize())
nodeDict = {'bxdf': RendermanBxdfNode,
'pattern': RendermanPatternNode,
'displacement': RendermanDisplacementNode,
'light': RendermanLightNode}
if nodeType not in nodeDict.keys():
return
ntype = type(typename, (nodeDict[nodeType],), {'__annotations__': {}})
ntype.bl_label = name
ntype.typename = typename
inputs = [p for p in args.findall('./param')] + \
[p for p in args.findall('./page')]
outputs = [p for p in args.findall('.//output')]
def init(self, context):
if self.renderman_node_type == 'bxdf':
self.outputs.new('RendermanShaderSocket', "Bxdf").type = 'SHADER'
#socket_template = self.socket_templates.new(identifier='Bxdf', name='Bxdf', type='SHADER')
node_add_inputs(self, name, self.prop_names)
node_add_outputs(self)
# if this is PxrLayerSurface set the diffusegain to 0. The default
# of 1 is unintuitive
if self.__annotations__['plugin_name'] == 'PxrLayerSurface':
self.diffuseGain = 0
elif self.renderman_node_type == 'light':
# only make a few sockets connectable
node_add_inputs(self, name, self.prop_names)
self.outputs.new('RendermanShaderSocket', "Light")
elif self.renderman_node_type == 'displacement':
# only make the color connectable
self.outputs.new('RendermanShaderSocket', "Displacement")
node_add_inputs(self, name, self.prop_names)
# else pattern
elif name == "PxrOSL":
self.outputs.clear()
else:
node_add_inputs(self, name, self.prop_names)
node_add_outputs(self)
if name == "PxrRamp":
node_group = bpy.data.node_groups.new(
'PxrRamp_nodegroup', 'ShaderNodeTree')
node_group.nodes.new('ShaderNodeValToRGB')
node_group.use_fake_user = True
self.node_group = node_group.name
update_conditional_visops(self)
def free(self):
if name == "PxrRamp":
bpy.data.node_groups.remove(bpy.data.node_groups[self.node_group])
ntype.init = init
ntype.free = free
if name == 'PxrRamp':
ntype.node_group = StringProperty('color_ramp', default='')
ntype.__annotations__["plugin_name"] = StringProperty(name='Plugin Name',
default=name, options={'HIDDEN'})
# lights cant connect to a node tree in 20.0
class_generate_properties(ntype, name, inputs + outputs)
if nodeType == 'light':
ntype.light_shading_rate = FloatProperty(
name="Light Shading Rate",
description="Shading Rate for this light. \
Leave this high unless detail is missing",
default=100.0)
ntype.light_primary_visibility = BoolProperty(
name="Light Primary Visibility",
description="Camera visibility for this light",
default=True)
bpy.utils.register_class(ntype)
return typename, ntype
# UI
def find_node_input(node, name):
for input in node.inputs:
if input.name == name:
return input
return None
def find_node(material, nodetype):
if material and material.node_tree:
ntree = material.node_tree
active_output_node = None
for node in ntree.nodes:
if getattr(node, "bl_idname", None) == nodetype:
if getattr(node, "is_active_output", True):
return node
if not active_output_node:
active_output_node = node
return active_output_node
return None
def find_node_input(node, name):
for input in node.inputs:
if input.name == name:
return input
return None
def panel_node_draw(layout, context, id_data, output_type, input_name):
ntree = id_data.node_tree
node = find_node(id_data, output_type)
if not node:
layout.label(text="No output node")
else:
input = find_node_input(node, input_name)
#layout.template_node_view(ntree, node, input)
draw_nodes_properties_ui(layout, context, ntree)
return True
def is_renderman_nodetree(material):
return find_node(material, 'RendermanOutputNode')
def draw_nodes_properties_ui(layout, context, nt, input_name='Bxdf',
output_node_type="output"):
output_node = next((n for n in nt.nodes
if hasattr(n, 'renderman_node_type') and n.renderman_node_type == output_node_type), None)
if output_node is None:
return
socket = output_node.inputs[input_name]
node = socket_node_input(nt, socket)
layout.context_pointer_set("nodetree", nt)
layout.context_pointer_set("node", output_node)
layout.context_pointer_set("socket", socket)
split = layout.split(0.35)
split.label(socket.name + ':')
if socket.is_linked:
# for lights draw the shading rate ui.
split.operator_menu_enum("node.add_%s" % input_name.lower(),
"node_type", text=node.bl_label)
else:
split.operator_menu_enum("node.add_%s" % input_name.lower(),
"node_type", text='None')
if node is not None:
draw_node_properties_recursive(layout, context, nt, node)
def socket_node_input(nt, socket):
return next((l.from_node for l in nt.links if l.to_socket == socket), None)
def socket_socket_input(nt, socket):
return next((l.from_socket for l in nt.links if l.to_socket == socket and socket.is_linked),
None)
def linked_sockets(sockets):
if sockets is None:
return []
return [i for i in sockets if i.is_linked]
def draw_node_properties_recursive(layout, context, nt, node, level=0):
def indented_label(layout, label, level):
for i in range(level):
layout.label('', icon='BLANK1')
if label:
layout.label(label)
layout.context_pointer_set("node", node)
layout.context_pointer_set("nodetree", nt)
def draw_props(prop_names, layout, level):
for prop_name in prop_names:
# skip showing the shape for PxrStdAreaLight
if prop_name in ["lightGroup", "rman__Shape", "coneAngle", "penumbraAngle"]:
continue
if prop_name == "codetypeswitch":
row = layout.row()
if node.codetypeswitch == 'INT':
row.prop_search(node, "internalSearch",
bpy.data, "texts", text="")
elif node.codetypeswitch == 'EXT':
row.prop(node, "shadercode")
elif prop_name == "internalSearch" or prop_name == "shadercode" or prop_name == "expression":
pass
else:
prop_meta = node.prop_meta[prop_name]
prop = getattr(node, prop_name)
if 'widget' in prop_meta and prop_meta['widget'] == 'null' or \
'hidden' in prop_meta and prop_meta['hidden']:
continue
# else check if the socket with this name is connected
socket = node.inputs[prop_name] if prop_name in node.inputs \
else None
layout.context_pointer_set("socket", socket)
if socket and socket.is_linked:
input_node = socket_node_input(nt, socket)
icon = 'DISCLOSURE_TRI_DOWN' if socket.ui_open \
else 'DISCLOSURE_TRI_RIGHT'
split = layout.split(NODE_LAYOUT_SPLIT)
row = split.row()
indented_label(row, None, level)
row.prop(socket, "ui_open", icon=icon, text='',
icon_only=True, emboss=False)
label = prop_meta.get('label', prop_name)
row.label(label + ':')
if ('type' in prop_meta and prop_meta['type'] == 'vstruct') or prop_name == 'inputMaterial':
split.operator_menu_enum("node.add_layer", "node_type",
text=input_node.bl_label, icon="LAYER_USED")
elif prop_meta['renderman_type'] == 'struct':
split.operator_menu_enum("node.add_manifold", "node_type",
text=input_node.bl_label, icon="LAYER_USED")
elif prop_meta['renderman_type'] == 'normal':
split.operator_menu_enum("node.add_bump", "node_type",
text=input_node.bl_label, icon="LAYER_USED")
else:
split.operator_menu_enum("node.add_pattern", "node_type",
text=input_node.bl_label, icon="LAYER_USED")
if socket.ui_open:
draw_node_properties_recursive(layout, context, nt,
input_node, level=level + 1)
else:
row = layout.row(align=True)
if prop_meta['renderman_type'] == 'page':
ui_prop = prop_name + "_uio"
ui_open = getattr(node, ui_prop)
icon = 'DISCLOSURE_TRI_DOWN' if ui_open \
else 'DISCLOSURE_TRI_RIGHT'
split = layout.split(NODE_LAYOUT_SPLIT)
row = split.row()
for i in range(level):
row.label('', icon='BLANK1')
row.prop(node, ui_prop, icon=icon, text='',
icon_only=True, emboss=False)
sub_prop_names = list(prop)
if node.bl_idname in {"PxrSurfaceBxdfNode", "PxrLayerPatternNode"}:
for pn in sub_prop_names:
if pn.startswith('enable'):
row.prop(node, pn, text='')
sub_prop_names.remove(pn)
break
row.label(prop_name.split('.')[-1] + ':')
if ui_open:
draw_props(sub_prop_names, layout, level + 1)
else:
indented_label(row, None, level)
# indented_label(row, socket.name+':')
# don't draw prop for struct type
if "Subset" in prop_name and prop_meta['type'] == 'string':
row.prop_search(node, prop_name, bpy.data.scenes[0].renderman,
"object_groups")
else:
if prop_meta['renderman_type'] != 'struct':
row.prop(node, prop_name, slider=True)
else:
row.label(prop_meta['label'])
if prop_name in node.inputs:
if ('type' in prop_meta and prop_meta['type'] == 'vstruct') or prop_name == 'inputMaterial':
row.operator_menu_enum("node.add_layer", "node_type",
text='', icon="LAYER_USED")
elif prop_meta['renderman_type'] == 'struct':
row.operator_menu_enum("node.add_manifold", "node_type",
text='', icon="LAYER_USED")
elif prop_meta['renderman_type'] == 'normal':
row.operator_menu_enum("node.add_bump", "node_type",
text='', icon="LAYER_USED")
else:
row.operator_menu_enum("node.add_pattern", "node_type",
text='', icon="LAYER_USED")
# if this is a cycles node do something different
if not hasattr(node, '__annotations__') or not "plugin_name" in node.__annotations__ or node.bl_idname == 'PxrOSLPatternNode':
node.draw_buttons(context, layout)
for input in node.inputs:
if input.is_linked:
input_node = socket_node_input(nt, input)
icon = 'DISCLOSURE_TRI_DOWN' if input.show_expanded \
else 'DISCLOSURE_TRI_RIGHT'
split = layout.split(NODE_LAYOUT_SPLIT)
row = split.row()
indented_label(row, None, level)
row.prop(input, "show_expanded", icon=icon, text='',
icon_only=True, emboss=False)
row.label(input.name + ':')
split.operator_menu_enum("node.add_pattern", "node_type",
text=input_node.bl_label, icon="LAYER_USED")
if input.show_expanded:
draw_node_properties_recursive(layout, context, nt,
input_node, level=level + 1)
else:
row = layout.row(align=True)
indented_label(row, None, level)
# indented_label(row, socket.name+':')
# don't draw prop for struct type
if input.hide_value:
row.label(input.name)
else:
row.prop(input, 'default_value',
slider=True, text=input.name)
row.operator_menu_enum("node.add_pattern", "node_type",
text='', icon="LAYER_USED")
else:
if node.plugin_name == 'PxrRamp':
dummy_nt = bpy.data.node_groups[node.node_group]
if dummy_nt:
layout.template_color_ramp(
dummy_nt.nodes['ColorRamp'], 'color_ramp')
draw_props(node.prop_names, layout, level)
layout.separator()
# Operators
# connect the pattern nodes in some sensible manner (color output to color input etc)
# TODO more robust
def link_node(nt, from_node, in_socket):
out_socket = None
# first look for resultF/resultRGB
if type(in_socket).__name__ in ['RendermanNodeSocketColor',
'RendermanNodeSocketVector']:
out_socket = from_node.outputs.get('resultRGB',
next((s for s in from_node.outputs
if type(s).__name__ == 'RendermanNodeSocketColor'), None))
elif type(in_socket).__name__ == 'RendermanNodeSocketStruct':
out_socket = from_node.outputs.get('pxrMaterialOut', None)
if not out_socket:
out_socket = from_node.outputs.get('result', None)
else:
out_socket = from_node.outputs.get('resultF',
next((s for s in from_node.outputs
if type(s).__name__ == 'RendermanNodeSocketFloat'), None))
if out_socket:
nt.links.new(out_socket, in_socket)
# bass class for operator to add a node
class Add_Node:
'''
For generating cycles-style ui menus to add new nodes,
connected to a given input socket.
'''
def get_type_items(self, context):
items = []
# if this is a pattern input do columns!
if self.input_type.lower() == 'pattern':
i = 0
for pattern_cat, patterns in pattern_categories.items():
if pattern_cat.lower() in ['layer', 'script', 'manifold', 'bump', 'displace']:
continue
items.append(('', pattern_cat, pattern_cat, '', 0))
for nodename in sorted(patterns):
nodetype = patterns[nodename]
items.append((nodetype.typename, nodetype.bl_label,
nodetype.bl_label, '', i))
i += 1
items.append(('', '', '', '', 0))
items.append(('REMOVE', 'Remove',
'Remove the node connected to this socket', '', i + 1))
items.append(('DISCONNECT', 'Disconnect',
'Disconnect the node connected to this socket', '', i + 2))
elif self.input_type.lower() in ['layer', 'manifold', 'bump']:
patterns = pattern_categories[self.input_type]
for nodename in sorted(patterns):
nodetype = patterns[nodename]
items.append((nodetype.typename, nodetype.bl_label,
nodetype.bl_label))
items.append(('REMOVE', 'Remove',
'Remove the node connected to this socket'))
items.append(('DISCONNECT', 'Disconnect',
'Disconnect the node connected to this socket'))
else:
for nodetype in nodetypes.values():
if self.input_type.lower() == 'light' and nodetype.renderman_node_type == 'light':
if nodetype.__name__ == 'PxrMeshLightLightNode':
items.append((nodetype.typename, nodetype.bl_label,
nodetype.bl_label))
elif nodetype.renderman_node_type == self.input_type.lower():
items.append((nodetype.typename, nodetype.bl_label,
nodetype.bl_label))
items = sorted(items, key=itemgetter(1))
items.append(('REMOVE', 'Remove',
'Remove the node connected to this socket'))
items.append(('DISCONNECT', 'Disconnect',
'Disconnect the node connected to this socket'))
return items
node_type: EnumProperty(name="Node Type",
description='Node type to add to this socket',
items=get_type_items)
def execute(self, context):
new_type = self.properties.node_type
if new_type == 'DEFAULT':
return {'CANCELLED'}
nt = context.nodetree
node = context.node
socket = context.socket
input_node = socket_node_input(nt, socket)
if new_type == 'REMOVE':
nt.nodes.remove(input_node)
return {'FINISHED'}
if new_type == 'DISCONNECT':
link = next((l for l in nt.links if l.to_socket == socket), None)
nt.links.remove(link)
return {'FINISHED'}
# add a new node to existing socket
if input_node is None:
newnode = nt.nodes.new(new_type)
newnode.location = node.location
newnode.location[0] -= 300
newnode.selected = False
if self.input_type in ['Pattern', 'Layer', 'Manifold', 'Bump']:
link_node(nt, newnode, socket)
else:
nt.links.new(newnode.outputs[self.input_type], socket)
# replace input node with a new one
else:
newnode = nt.nodes.new(new_type)
input = socket
old_node = input.links[0].from_node
if self.input_type == 'Pattern':
link_node(nt, newnode, socket)
else:
nt.links.new(newnode.outputs[self.input_type], socket)
newnode.location = old_node.location
active_material = context.active_object.active_material
newnode.update_mat(active_material)
nt.nodes.remove(old_node)
return {'FINISHED'}
class NODE_OT_add_bxdf(bpy.types.Operator, Add_Node):
'''
For generating cycles-style ui menus to add new bxdfs,
connected to a given input socket.
'''
bl_idname = 'node.add_bxdf'
bl_label = 'Add Bxdf Node'
bl_description = 'Connect a Bxdf to this socket'
input_type: StringProperty(default='Bxdf')
class NODE_OT_add_displacement(bpy.types.Operator, Add_Node):
'''
For generating cycles-style ui menus to add new nodes,
connected to a given input socket.
'''
bl_idname = 'node.add_displacement'
bl_label = 'Add Displacement Node'
bl_description = 'Connect a Displacement shader to this socket'
input_type: StringProperty(default='Displacement')
class NODE_OT_add_light(bpy.types.Operator, Add_Node):
'''
For generating cycles-style ui menus to add new nodes,
connected to a given input socket.
'''
bl_idname = 'node.add_light'
bl_label = 'Add Light Node'
bl_description = 'Connect a Light shader to this socket'
input_type: StringProperty(default='Light')
class NODE_OT_add_pattern(bpy.types.Operator, Add_Node):
'''
For generating cycles-style ui menus to add new nodes,
connected to a given input socket.
'''
bl_idname = 'node.add_pattern'
bl_label = 'Add Pattern Node'
bl_description = 'Connect a Pattern to this socket'
input_type: StringProperty(default='Pattern')
class NODE_OT_add_layer(bpy.types.Operator, Add_Node):
'''
For generating cycles-style ui menus to add new nodes,
connected to a given input socket.
'''
bl_idname = 'node.add_layer'
bl_label = 'Add Layer Node'
bl_description = 'Connect a PxrLayer'
input_type: StringProperty(default='Layer')
class NODE_OT_add_manifold(bpy.types.Operator, Add_Node):
'''
For generating cycles-style ui menus to add new nodes,
connected to a given input socket.
'''
bl_idname = 'node.add_manifold'
bl_label = 'Add Manifold Node'
bl_description = 'Connect a Manifold'
input_type: StringProperty(default='Manifold')
class NODE_OT_add_bump(bpy.types.Operator, Add_Node):
'''
For generating cycles-style ui menus to add new nodes,
connected to a given input socket.
'''
bl_idname = 'node.add_bump'
bl_label = 'Add Bump Node'
bl_description = 'Connect a bump node'
input_type: StringProperty(default='Bump')
# return if this param has a vstuct connection or linked independently
def is_vstruct_or_linked(node, param):
meta = node.prop_meta[param]
if 'vstructmember' not in meta.keys():
return node.inputs[param].is_linked
elif param in node.inputs and node.inputs[param].is_linked:
return True
else:
vstruct_name, vstruct_member = meta['vstructmember'].split('.')
if node.inputs[vstruct_name].is_linked:
from_socket = node.inputs[vstruct_name].links[0].from_socket
vstruct_from_param = "%s_%s" % (
from_socket.identifier, vstruct_member)
return vstruct_conditional(from_socket.node, vstruct_from_param)
else:
return False
# tells if this param has a vstuct connection that is linked and
# conditional met
def is_vstruct_and_linked(node, param):
meta = node.prop_meta[param]
if 'vstructmember' not in meta.keys():
return False
else:
vstruct_name, vstruct_member = meta['vstructmember'].split('.')
if node.inputs[vstruct_name].is_linked:
from_socket = node.inputs[vstruct_name].links[0].from_socket
# if coming from a shader group hookup across that
if from_socket.node.bl_idname == 'ShaderNodeGroup':
ng = from_socket.node.node_tree
group_output = next((n for n in ng.nodes if n.bl_idname == 'NodeGroupOutput'),
None)
if group_output is None:
return False
in_sock = group_output.inputs[from_socket.name]
if len(in_sock.links):
from_socket = in_sock.links[0].from_socket
vstruct_from_param = "%s_%s" % (
from_socket.identifier, vstruct_member)
return vstruct_conditional(from_socket.node, vstruct_from_param)
else:
return False
# gets the value for a node walking up the vstruct chain
def get_val_vstruct(node, param):
if param in node.inputs and node.inputs[param].is_linked:
from_socket = node.inputs[param].links[0].from_socket
return get_val_vstruct(from_socket.node, from_socket.identifier)
elif is_vstruct_and_linked(node, param):
return True
else:
return getattr(node, param)
# parse a vstruct conditional string and return true or false if should link
def vstruct_conditional(node, param):
if not hasattr(node, 'shader_meta') and not hasattr(node, 'output_meta'):
return False
meta = getattr(
node, 'shader_meta') if node.bl_idname == "PxrOSLPatternNode" else node.output_meta
if param not in meta:
return False
meta = meta[param]
if 'vstructConditionalExpr' not in meta.keys():
return True
expr = meta['vstructConditionalExpr']
expr = expr.replace('connect if ', '')
set_zero = False
if ' else set 0' in expr:
expr = expr.replace(' else set 0', '')
set_zero = True
tokens = expr.split()
new_tokens = []
i = 0
num_tokens = len(tokens)
while i < num_tokens:
token = tokens[i]
prepend, append = '', ''
while token[0] == '(':
token = token[1:]
prepend += '('
while token[-1] == ')':
token = token[:-1]
append += ')'
if token == 'set':
i += 1
continue
# is connected change this to node.inputs.is_linked
if i < num_tokens - 2 and tokens[i + 1] == 'is'\
and 'connected' in tokens[i + 2]:
token = "is_vstruct_or_linked(node, '%s')" % token
last_token = tokens[i + 2]
while last_token[-1] == ')':
last_token = last_token[:-1]
append += ')'
i += 3
else:
i += 1
if hasattr(node, token):
token = "get_val_vstruct(node, '%s')" % token
new_tokens.append(prepend + token + append)
if 'if' in new_tokens and 'else' not in new_tokens:
new_tokens.extend(['else', 'False'])
return eval(" ".join(new_tokens))
# Rib export
gains_to_enable = {
'diffuseGain': 'enableDiffuse',
'specularFaceColor': 'enablePrimarySpecular',
'specularEdgeColor': 'enablePrimarySpecular',
'roughSpecularFaceColor': 'enableRoughSpecular',
'roughSpecularEdgeColor': 'enableRoughSpecular',
'clearcoatFaceColor': 'enableClearCoat',
'clearcoatEdgeColor': 'enableClearCoat',
'iridescenceFaceGain': 'enableIridescence',
'iridescenceEdgeGain': 'enableIridescence',
'fuzzGain': 'enableFuzz',
'subsurfaceGain': 'enableSubsurface',
'singlescatterGain': 'enableSingleScatter',
'singlescatterDirectGain': 'enableSingleScatter',
'refractionGain': 'enableGlass',
'reflectionGain': 'enableGlass',
'glowGain': 'enableGlow',
}
# generate param list
def gen_params(ri, node, mat_name=None):
params = {}
# If node is OSL node get properties from dynamic location.
if node.bl_idname == "PxrOSLPatternNode":
if getattr(node, "codetypeswitch") == "EXT":
prefs = bpy.context.preferences.addons[__package__].preferences
osl_path = user_path(getattr(node, 'shadercode'))
FileName = os.path.basename(osl_path)
FileNameNoEXT,ext = os.path.splitext(FileName)
out_file = os.path.join(
user_path(prefs.env_vars.out), "shaders", FileName)
if ext == ".oso":
if not os.path.exists(out_file) or not os.path.samefile(osl_path, out_file):
if not os.path.exists(os.path.join(user_path(prefs.env_vars.out), "shaders")):
os.mkdir(os.path.join(user_path(prefs.env_vars.out), "shaders"))
shutil.copy(osl_path, out_file)
for input_name, input in node.inputs.items():
prop_type = input.renderman_type
if input.is_linked:
to_socket = input
from_socket = input.links[0].from_socket
params['reference %s %s' % (prop_type, input_name)] = \
[get_output_param_str(
from_socket.node, mat_name, from_socket, to_socket)]
elif type(input) != RendermanNodeSocketStruct:
params['%s %s' % (prop_type, input_name)] = \
rib(input.default_value,
type_hint=prop_type)
# Special case for SeExpr Nodes. Assume that the code will be in a file so
# that needs to be extracted.
elif node.bl_idname == "PxrSeExprPatternNode":
fileInputType = node.codetypeswitch
for prop_name, meta in node.prop_meta.items():
if prop_name in ["codetypeswitch", 'filename']:
pass
elif prop_name == "internalSearch" and fileInputType == 'INT':
if node.internalSearch != "":
script = bpy.data.texts[node.internalSearch]
params['%s %s' % ("string",
"expression")] = \
rib(script.as_string(),
type_hint=meta['renderman_type'])
elif prop_name == "shadercode" and fileInputType == "NODE":
params['%s %s' % ("string", "expression")] = node.expression
else:
prop = getattr(node, prop_name)
# if input socket is linked reference that
if prop_name in node.inputs and \
node.inputs[prop_name].is_linked:
to_socket = node.inputs[prop_name]
from_socket = to_socket.links[0].from_socket
params['reference %s %s' % (meta['renderman_type'],
meta['renderman_name'])] = \
[get_output_param_str(
from_socket.node, mat_name, from_socket, to_socket)]
# else output rib
else:
params['%s %s' % (meta['renderman_type'],
meta['renderman_name'])] = \
rib(prop, type_hint=meta['renderman_type'])
else:
for prop_name, meta in node.prop_meta.items():
if prop_name in txmake_options.index:
pass
elif node.__annotations__["plugin_name"] == 'PxrRamp' and prop_name in ['colors', 'positions']:
pass
elif(prop_name in ['sblur', 'tblur', 'notes']):
pass
else:
prop = getattr(node, prop_name)
# if property group recurse
if meta['renderman_type'] == 'page':
continue
elif prop_name == 'inputMaterial' or \
('type' in meta and meta['type'] == 'vstruct'):
continue
# if input socket is linked reference that
elif hasattr(node, 'inputs') and prop_name in node.inputs and \
node.inputs[prop_name].is_linked:
to_socket = node.inputs[prop_name]
from_socket = to_socket.links[0].from_socket
from_node = to_socket.links[0].from_node
if 'arraySize' in meta:
params['reference %s[1] %s' % (meta['renderman_type'],
meta['renderman_name'])] \
= [get_output_param_str(
from_node, mat_name, from_socket, to_socket)]
else:
params['reference %s %s' % (meta['renderman_type'],
meta['renderman_name'])] = \
[get_output_param_str(
from_node, mat_name, from_socket, to_socket)]
# see if vstruct linked
elif is_vstruct_and_linked(node, prop_name):
vstruct_name, vstruct_member = meta[
'vstructmember'].split('.')
from_socket = node.inputs[
vstruct_name].links[0].from_socket
temp_mat_name = mat_name
if from_socket.node.bl_idname == 'ShaderNodeGroup':
ng = from_socket.node.node_tree
group_output = next((n for n in ng.nodes if n.bl_idname == 'NodeGroupOutput'),
None)
if group_output is None:
return False
in_sock = group_output.inputs[from_socket.name]
if len(in_sock.links):
from_socket = in_sock.links[0].from_socket
temp_mat_name = mat_name + '.' + from_socket.node.name
vstruct_from_param = "%s_%s" % (
from_socket.identifier, vstruct_member)
if vstruct_from_param in from_socket.node.output_meta:
actual_socket = from_socket.node.output_meta[
vstruct_from_param]
params['reference %s %s' % (meta['renderman_type'],
meta['renderman_name'])] = \
[get_output_param_str(
from_socket.node, temp_mat_name, actual_socket)]
else:
print('Warning! %s not found on %s' %
(vstruct_from_param, from_socket.node.name))
# else output rib
else:
# if struct is not linked continue
if meta['renderman_type'] in ['struct', 'enum']:
continue
# if this is a gain on PxrSurface and the lobe isn't
# enabled
if node.bl_idname == 'PxrSurfaceBxdfNode' and \
prop_name in gains_to_enable and \
not getattr(node, gains_to_enable[prop_name]):
val = [0, 0, 0] if meta[
'renderman_type'] == 'color' else 0
params['%s %s' % (meta['renderman_type'],
meta['renderman_name'])] = val
elif 'options' in meta and meta['options'] == 'texture' \
and node.bl_idname != "PxrPtexturePatternNode" or \
('widget' in meta and meta['widget'] == 'assetIdInput' and prop_name != 'iesProfile'):
params['%s %s' % (meta['renderman_type'],
meta['renderman_name'])] = \
rib(get_tex_file_name(prop),
type_hint=meta['renderman_type'])
elif 'arraySize' in meta:
if type(prop) == int:
prop = [prop]
params['%s[%d] %s' % (meta['renderman_type'], len(prop),
meta['renderman_name'])] \
= rib(prop)
else:
params['%s %s' % (meta['renderman_type'],
meta['renderman_name'])] = \
rib(prop, type_hint=meta['renderman_type'])
if node.__annotations__["plugin_name"] == 'PxrRamp':
nt = bpy.data.node_groups[node.node_group]
if nt:
dummy_ramp = nt.nodes['ColorRamp']
colors = []
positions = []
# double the start and end points
positions.append(float(dummy_ramp.color_ramp.elements[0].position))
colors.extend(dummy_ramp.color_ramp.elements[0].color[:3])
for e in dummy_ramp.color_ramp.elements:
positions.append(float(e.position))
colors.extend(e.color[:3])
positions.append(
float(dummy_ramp.color_ramp.elements[-1].position))
colors.extend(dummy_ramp.color_ramp.elements[-1].color[:3])
params['color[%d] colors' % len(positions)] = colors
params['float[%d] positions' % len(positions)] = positions
return params
def create_rman_surface(nt, parent_node, input_index, node_type="PxrSurfaceBxdfNode"):
layer = nt.nodes.new(node_type)
nt.links.new(layer.outputs[0], parent_node.inputs[input_index])
setattr(layer, 'enableDiffuse', False)
layer.location = parent_node.location
layer.diffuseGain = 0
layer.location[0] -= 300
return layer
combine_nodes = ['ShaderNodeAddShader', 'ShaderNodeMixShader']
# rman_parent could be PxrSurface or PxrMixer
def convert_cycles_bsdf(nt, rman_parent, node, input_index):
# if mix or add pass both to parent
if node.bl_idname in combine_nodes:
i = 0 if node.bl_idname == 'ShaderNodeAddShader' else 1
node1 = node.inputs[
0 + i].links[0].from_node if node.inputs[0 + i].is_linked else None
node2 = node.inputs[
1 + i].links[0].from_node if node.inputs[1 + i].is_linked else None
if not node1 and not node2:
return
elif not node1:
convert_cycles_bsdf(nt, rman_parent, node2, input_index)
elif not node2:
convert_cycles_bsdf(nt, rman_parent, node1, input_index)
# if ones a combiner or they're of the same type and not glossy we need
# to make a mixer
elif node.bl_idname == 'ShaderNodeMixShader' or node1.bl_idname in combine_nodes \
or node2.bl_idname in combine_nodes or \
node1.bl_idname == 'ShaderNodeGroup' or node2.bl_idname == 'ShaderNodeGroup' \
or (bsdf_map[node1.bl_idname][0] == bsdf_map[node2.bl_idname][0]):
mixer = nt.nodes.new('PxrLayerMixerPatternNode')
# if parent is output make a pxr surface first
nt.links.new(mixer.outputs["pxrMaterialOut"],
rman_parent.inputs[input_index])
offset_node_location(rman_parent, mixer, node)
# set the layer masks
if node.bl_idname == 'ShaderNodeAddShader':
mixer.layer1Mask = .5
else:
convert_cycles_input(
nt, node.inputs['Fac'], mixer, 'layer1Mask')
# make a new node for each
convert_cycles_bsdf(nt, mixer, node1, 0)
convert_cycles_bsdf(nt, mixer, node2, 1)
# this is a heterogenous mix of add
else:
if rman_parent.__annotations__["plugin_name"] == 'PxrLayerMixer':
old_parent = rman_parent
rman_parent = create_rman_surface(nt, rman_parent, input_index,
'PxrLayerPatternNode')
offset_node_location(old_parent, rman_parent, node)
convert_cycles_bsdf(nt, rman_parent, node1, 0)
convert_cycles_bsdf(nt, rman_parent, node2, 1)
# else set lobe on parent
elif 'Bsdf' in node.bl_idname or node.bl_idname == 'ShaderNodeSubsurfaceScattering':
if rman_parent.__annotations__["plugin_name"] == 'PxrLayerMixer':
old_parent = rman_parent
rman_parent = create_rman_surface(nt, rman_parent, input_index,
'PxrLayerPatternNode')
offset_node_location(old_parent, rman_parent, node)
node_type = node.bl_idname
bsdf_map[node_type][1](nt, node, rman_parent)
# if we find an emission node, naively make it a meshlight
# note this will only make the last emission node the light
elif node.bl_idname == 'ShaderNodeEmission':
output = next((n for n in nt.nodes if hasattr(n, 'renderman_node_type') and
n.renderman_node_type == 'output'),
None)
meshlight = nt.nodes.new("PxrMeshLightLightNode")
nt.links.new(meshlight.outputs[0], output.inputs["Light"])
meshlight.location = output.location
meshlight.location[0] -= 300
convert_cycles_input(
nt, node.inputs['Strength'], meshlight, "intensity")
if node.inputs['Color'].is_linked:
convert_cycles_input(
nt, node.inputs['Color'], meshlight, "textureColor")
else:
setattr(meshlight, 'lightColor', node.inputs[
'Color'].default_value[:3])
else:
rman_node = convert_cycles_node(nt, node)
nt.links.new(rman_node.outputs[0], rman_parent.inputs[input_index])
def convert_cycles_displacement(nt, surface_node, displace_socket):
# for now just do bump
if displace_socket.is_linked:
bump = nt.nodes.new("PxrBumpPatternNode")
nt.links.new(bump.outputs[0], surface_node.inputs['bumpNormal'])
bump.location = surface_node.location
bump.location[0] -= 200
bump.location[1] -= 100
convert_cycles_input(nt, displace_socket, bump, "inputBump")
# return
# if displace_socket.is_linked:
# displace = nt.nodes.new("PxrDisplaceDisplacementNode")
# nt.links.new(displace.outputs[0], output_node.inputs['Displacement'])
# displace.location = output_node.location
# displace.location[0] -= 200
# displace.location[1] -= 100
# setattr(displace, 'dispAmount', .01)
# convert_cycles_input(nt, displace_socket, displace, "dispScalar")
# could make this more robust to shift the entire nodetree to below the
# bounds of the cycles nodetree
def set_ouput_node_location(nt, output_node, cycles_output):
output_node.location = cycles_output.location
output_node.location[1] -= 500
def offset_node_location(rman_parent, rman_node, cycles_node):
linked_socket = next((sock for sock in cycles_node.outputs if sock.is_linked),
None)
rman_node.location = rman_parent.location
if linked_socket:
rman_node.location += (cycles_node.location -
linked_socket.links[0].to_node.location)
def convert_cycles_nodetree(id, output_node, reporter):
# find base node
from . import cycles_convert
cycles_convert.converted_nodes = {}
nt = id.node_tree
reporter({'INFO'}, 'Converting material ' + id.name + ' to RenderMan')
cycles_output_node = find_node(id, 'ShaderNodeOutputMaterial')
if not cycles_output_node:
reporter({'WARNING'}, 'No Cycles output found ' + id.name)
return False
# if no bsdf return false
if not cycles_output_node.inputs[0].is_linked:
reporter({'WARNING'}, 'No Cycles bsdf found ' + id.name)
return False
# set the output node location
set_ouput_node_location(nt, output_node, cycles_output_node)
# walk tree
cycles_convert.report = reporter
begin_cycles_node = cycles_output_node.inputs[0].links[0].from_node
# if this is an emission use PxrLightEmission
if begin_cycles_node.bl_idname == "ShaderNodeEmission":
meshlight = nt.nodes.new("PxrMeshLightLightNode")
nt.links.new(meshlight.outputs[0], output_node.inputs["Light"])
offset_node_location(output_node, meshlight, begin_cycles_node)
convert_cycles_input(nt, begin_cycles_node.inputs[
'Strength'], meshlight, "intensity")
if begin_cycles_node.inputs['Color'].is_linked:
convert_cycles_input(nt, begin_cycles_node.inputs[
'Color'], meshlight, "textureColor")
else:
setattr(meshlight, 'lightColor', begin_cycles_node.inputs[
'Color'].default_value[:3])
bxdf = nt.nodes.new('PxrBlackBxdfNode')
nt.links.new(bxdf.outputs[0], output_node.inputs["Bxdf"])
else:
base_surface = create_rman_surface(nt, output_node, 0)
offset_node_location(output_node, base_surface, begin_cycles_node)
convert_cycles_bsdf(nt, base_surface, begin_cycles_node, 0)
convert_cycles_displacement(
nt, base_surface, cycles_output_node.inputs[2])
return True
cycles_node_map = {
'ShaderNodeAttribute': 'node_attribute',
'ShaderNodeBlackbody': 'node_checker_blackbody',
'ShaderNodeTexBrick': 'node_brick_texture',
'ShaderNodeBrightContrast': 'node_brightness',
'ShaderNodeTexChecker': 'node_checker_texture',
'ShaderNodeBump': 'node_bump',
'ShaderNodeCameraData': 'node_camera',
'ShaderNodeTexChecker': 'node_checker_texture',
'ShaderNodeCombineHSV': 'node_combine_hsv',
'ShaderNodeCombineRGB': 'node_combine_rgb',
'ShaderNodeCombineXYZ': 'node_combine_xyz',
'ShaderNodeTexEnvironment': 'node_environment_texture',
'ShaderNodeFresnel': 'node_fresnel',
'ShaderNodeGamma': 'node_gamma',
'ShaderNodeNewGeometry': 'node_geometry',
'ShaderNodeTexGradient': 'node_gradient_texture',
'ShaderNodeHairInfo': 'node_hair_info',
'ShaderNodeInvert': 'node_invert',
'ShaderNodeHueSaturation': 'node_hsv',
'ShaderNodeTexImage': 'node_image_texture',
'ShaderNodeHueSaturation': 'node_hsv',
'ShaderNodeLayerWeight': 'node_layer_weight',
'ShaderNodeLightFalloff': 'node_light_falloff',
'ShaderNodeLightPath': 'node_light_path',
'ShaderNodeTexMagic': 'node_magic_texture',
'ShaderNodeMapping': 'node_mapping',
'ShaderNodeMath': 'node_math',
'ShaderNodeMixRGB': 'node_mix',
'ShaderNodeTexMusgrave': 'node_musgrave_texture',
'ShaderNodeTexNoise': 'node_noise_texture',
'ShaderNodeNormal': 'node_normal',
'ShaderNodeNormalMap': 'node_normal_map',
'ShaderNodeObjectInfo': 'node_object_info',
'ShaderNodeParticleInfo': 'node_particle_info',
'ShaderNodeRGBCurve': 'node_rgb_curves',
'ShaderNodeValToRGB': 'node_rgb_ramp',
'ShaderNodeSeparateHSV': 'node_separate_hsv',
'ShaderNodeSeparateRGB': 'node_separate_rgb',
'ShaderNodeSeparateXYZ': 'node_separate_xyz',
'ShaderNodeTexSky': 'node_sky_texture',
'ShaderNodeTangent': 'node_tangent',
'ShaderNodeTexCoord': 'node_texture_coordinate',
'ShaderNodeUVMap': 'node_uv_map',
'ShaderNodeValue': 'node_value',
'ShaderNodeVectorCurves': 'node_vector_curves',
'ShaderNodeVectorMath': 'node_vector_math',
'ShaderNodeVectorTransform': 'node_vector_transform',
'ShaderNodeTexVoronoi': 'node_voronoi_texture',
'ShaderNodeTexWave': 'node_wave_texture',
'ShaderNodeWavelength': 'node_wavelength',
'ShaderNodeWireframe': 'node_wireframe',
}
def get_mat_name(mat_name):
return mat_name.replace(' ', '')
def get_node_name(node, mat_name):
return "%s.%s" % (mat_name, node.name.replace(' ', ''))
def get_socket_name(node, socket):
if type(socket) == dict:
return socket['name'].replace(' ', '')
# if this is a renderman node we can just use the socket name,
else:
if not hasattr('node', '__annotations__') or not 'plugin_name' in node.__annotations__:
if socket.name in node.inputs and socket.name in node.outputs:
suffix = 'Out' if socket.is_output else 'In'
return socket.name.replace(' ', '') + suffix
return socket.identifier.replace(' ', '')
def get_socket_type(node, socket):
sock_type = socket.type.lower()
if sock_type == 'rgba':
return 'color'
elif sock_type == 'value':
return 'float'
elif sock_type == 'vector':
return 'point'
else:
return sock_type
# do we need to convert this socket?
def do_convert_socket(from_socket, to_socket):
if not to_socket:
return False
return (is_float_type(from_socket) and is_float3_type(to_socket)) or \
(is_float3_type(from_socket) and is_float_type(to_socket))
def build_output_param_str(mat_name, from_node, from_socket, convert_socket=False):
from_node_name = get_node_name(from_node, mat_name)
from_sock_name = get_socket_name(from_node, from_socket)
# replace with the convert node's output
if convert_socket:
if is_float_type(from_socket):
return "convert_%s.%s:resultRGB" % (from_node_name, from_sock_name)
else:
return "convert_%s.%s:resultF" % (from_node_name, from_sock_name)
else:
return "%s:%s" % (from_node_name, from_sock_name)
def get_output_param_str(node, mat_name, socket, to_socket=None):
# if this is a node group, hook it up to the input node inside!
if node.bl_idname == 'ShaderNodeGroup':
ng = node.node_tree
group_output = next((n for n in ng.nodes if n.bl_idname == 'NodeGroupOutput'),
None)
if group_output is None:
return "error:error"
in_sock = group_output.inputs[socket.name]
if len(in_sock.links):
link = in_sock.links[0]
return build_output_param_str(mat_name + '.' + node.name, link.from_node, link.from_socket, do_convert_socket(link.from_socket, to_socket))
else:
return "error:error"
if node.bl_idname == 'NodeGroupInput':
global current_group_node
if current_group_node is None:
return "error:error"
in_sock = current_group_node.inputs[socket.name]
if len(in_sock.links):
link = in_sock.links[0]
return build_output_param_str(mat_name, link.from_node, link.from_socket, do_convert_socket(link.from_socket, to_socket))
else:
return "error:error"
return build_output_param_str(mat_name, node, socket, do_convert_socket(socket, to_socket))
# hack!!!
current_group_node = None
def translate_node_group(ri, group_node, mat_name):
ng = group_node.node_tree
out = next((n for n in ng.nodes if n.bl_idname == 'NodeGroupOutput'),
None)
if out is None:
return
nodes_to_export = gather_nodes(out)
global current_group_node
current_group_node = group_node
for node in nodes_to_export:
shader_node_rib(ri, node, mat_name=(mat_name + '.' + group_node.name))
current_group_node = None
def translate_cycles_node(ri, node, mat_name):
if node.bl_idname == 'ShaderNodeGroup':
translate_node_group(ri, node, mat_name)
return
if node.bl_idname not in cycles_node_map.keys():
print('No translation for node of type %s named %s' %
(node.bl_idname, node.name))
return
mapping = cycles_node_map[node.bl_idname]
params = {}
for in_name, input in node.inputs.items():
param_name = "%s %s" % (get_socket_type(
node, input), get_socket_name(node, input))
if input.is_linked:
param_name = 'reference ' + param_name
link = input.links[0]
param_val = get_output_param_str(
link.from_node, mat_name, link.from_socket, input)
else:
param_val = rib(input.default_value,
type_hint=get_socket_type(node, input))
# skip if this is a vector set to 0 0 0
if input.type == 'VECTOR' and param_val == [0.0, 0.0, 0.0]:
continue
params[param_name] = param_val
ramp_size = 256
if node.bl_idname == 'ShaderNodeValToRGB':
colors = []
alphas = []
for i in range(ramp_size):
c = node.color_ramp.evaluate(float(i) / (ramp_size - 1.0))
colors.extend(c[:3])
alphas.append(c[3])
params['color[%d] ramp_color' % ramp_size] = colors
params['float[%d] ramp_alpha' % ramp_size] = alphas
elif node.bl_idname == 'ShaderNodeVectorCurve':
colors = []
node.mapping.initialize()
r = node.mapping.curves[0]
g = node.mapping.curves[1]
b = node.mapping.curves[2]
for i in range(ramp_size):
v = float(i) / (ramp_size - 1.0)
colors.extend([r.evaluate(v), g.evaluate(v), b.evaluate(v)])
params['color[%d] ramp' % ramp_size] = colors
elif node.bl_idname == 'ShaderNodeRGBCurve':
colors = []
node.mapping.initialize()
c = node.mapping.curves[0]
r = node.mapping.curves[1]
g = node.mapping.curves[2]
b = node.mapping.curves[3]
for i in range(ramp_size):
v = float(i) / (ramp_size - 1.0)
c_val = c.evaluate(v)
colors.extend([r.evaluate(v) * c_val, g.evaluate(v)
* c_val, b.evaluate(v) * c_val])
params['color[%d] ramp' % ramp_size] = colors
#print('doing %s %s' % (node.bl_idname, node.name))
# print(params)
ri.Pattern(mapping, get_node_name(node, mat_name), params)
# Export to rib
def shader_node_rib(ri, node, mat_name, disp_bound=0.0, portal=False):
# this is tuple telling us to convert
if type(node) == type(()):
shader, from_node, from_socket = node
input_type = 'float' if shader == 'PxrToFloat3' else 'color'
node_name = 'convert_%s.%s' % (get_node_name(
from_node, mat_name), get_socket_name(from_node, from_socket))
if from_node.bl_idname == 'ShaderNodeGroup':
node_name = 'convert_' + get_output_param_str(
from_node, mat_name, from_socket).replace(':', '.')
params = {"reference %s input" % input_type: get_output_param_str(
from_node, mat_name, from_socket)}
params['__instanceid'] = node_name
ri.Pattern(shader, node_name, params)
return
elif not hasattr(node, 'renderman_node_type'):
return translate_cycles_node(ri, node, mat_name)
params = gen_params(ri, node, mat_name)
instance = mat_name + '.' + node.name
params['__instanceid'] = instance
if 'string filename' in params:
params['string filename'] = bpy.path.abspath(params['string filename'])
if node.renderman_node_type == "pattern":
if node.bl_label == 'PxrOSL':
shader = node.__annotations__['plugin_name']
if shader:
ri.Pattern(shader, instance, params)
else:
ri.Pattern(node.bl_label, instance, params)
elif node.renderman_node_type == "light":
light_group_name = ''
scene = bpy.context.scene
for lg in scene.renderman.light_groups:
if mat_name in lg.members.keys():
light_group_name = lg.name
break
params['string lightGroup'] = light_group_name
params['__instanceid'] = mat_name
light_name = node.bl_label
if light_name == 'PxrPortalLight':
if mat_name in bpy.data.lamps:
lamp = bpy.context.scene.objects.active
if lamp and lamp.parent and lamp.parent.type == 'LAMP' \
and lamp.parent.data.renderman.renderman_type == 'ENV':
from .export import property_group_to_params
parent_node = lamp.parent.data.renderman.get_light_node()
parent_params = property_group_to_params(parent_node)
params['string domeSpace'] = lamp.parent.name
params['string portalName'] = mat_name
params['string domeColorMap'] = parent_params['string lightColorMap']
params['float intensity'] = parent_params['float intensity'] * params['float intensityMult']
del params['float intensityMult']
params['float exposure'] = parent_params['float exposure']
params['color lightColor'] = [i*j for i,j in zip(parent_params['color lightColor'],params['color tint'])]
del params['color tint']
if not params['int enableTemperature']:
params['int enableTemperature'] = parent_params['int enableTemperature']
params['float temperature'] = parent_params['float temperature']
params['float specular'] *= parent_params['float specular']
params['float diffuse'] *= parent_params['float diffuse']
ri.Light(light_name, mat_name, params)
elif node.renderman_node_type == "lightfilter":
params['__instanceid'] = mat_name
light_name = node.bl_label
ri.LightFilter(light_name, mat_name, params)
elif node.renderman_node_type == "displacement":
ri.Attribute('displacementbound', {'sphere': disp_bound})
ri.Displace(node.bl_label, mat_name, params)
else:
ri.Bxdf(node.bl_label, instance, params)
def replace_frame_num(prop):
frame_num = bpy.data.scenes[0].frame_current
prop = prop.replace('$f4', str(frame_num).zfill(4))
prop = prop.replace('$F4', str(frame_num).zfill(4))
prop = prop.replace('$f3', str(frame_num).zfill(3))
prop = prop.replace('$F3', str(frame_num).zfill(3))
return prop
# return the output file name if this texture is to be txmade.
def get_tex_file_name(prop):
prop = replace_frame_num(prop)
prop = bpy.path.basename(prop)
part = prop.rpartition('.')
prop = part[0]
if prop != '' and part[2].lower() != 'tex':
_p_ = bpy.context.scene.renderman.path_texture_output
#
# just in case there is a leading path separator
#
_s_ = "" if _p_.endswith("/") or _p_.endswith("\\") else "/"
_f_ = "{}{}{}{}".format(_p_, _s_, prop, ".tex")
return user_path(_f_)
else:
return prop
def is_same_type(socket1, socket2):
return (type(socket1) == type(socket2)) or (is_float_type(socket1) and is_float_type(socket2)) or \
(is_float3_type(socket1) and is_float3_type(socket2))
def is_float_type(socket):
# this is a renderman node
if type(socket) == type({}):
return socket['renderman_type'] in ['int', 'float']
elif hasattr(socket.node, '__annotations__') and 'plugin_name' in node.__annotations__:
prop_meta = getattr(socket.node, 'output_meta', [
]) if socket.is_output else getattr(socket.node, 'prop_meta', [])
if socket.name in prop_meta:
return prop_meta[socket.name]['renderman_type'] in ['int', 'float']
else:
return socket.type in ['INT', 'VALUE']
def is_float3_type(socket):
# this is a renderman node
if type(socket) == type({}):
return socket['renderman_type'] in ['int', 'float']
elif hasattr(socket.node, '__annotations__') and 'plugin_name' in node.__annotations__:
prop_meta = getattr(socket.node, 'output_meta', [
]) if socket.is_output else getattr(socket.node, 'prop_meta', [])
if socket.name in prop_meta:
return prop_meta[socket.name]['renderman_type'] in ['color', 'vector', 'normal']
else:
return socket.type in ['RGBA', 'VECTOR']
# walk the tree for nodes to export
def gather_nodes(node):
nodes = []
for socket in node.inputs:
if socket.is_linked:
link = socket.links[0]
for sub_node in gather_nodes(socket.links[0].from_node):
if sub_node not in nodes:
nodes.append(sub_node)
# if this is a float -> color inset a tofloat3
if is_float_type(link.from_socket) and is_float3_type(socket):
convert_node = ('PxrToFloat3', link.from_node,
link.from_socket)
if convert_node not in nodes:
nodes.append(convert_node)
elif is_float3_type(link.from_socket) and is_float_type(socket):
convert_node = ('PxrToFloat', link.from_node, link.from_socket)
if convert_node not in nodes:
nodes.append(convert_node)
if hasattr(node, 'renderman_node_type') and node.renderman_node_type != 'output':
nodes.append(node)
elif not hasattr(node, 'renderman_node_type') and node.bl_idname not in ['ShaderNodeOutputMaterial', 'NodeGroupInput', 'NodeGroupOutput']:
nodes.append(node)
return nodes
# for an input node output all "nodes"
def export_shader_nodetree(ri, id, handle=None, disp_bound=0.0, iterate_instance=False):
if id and id.node_tree:
if is_renderman_nodetree(id):
portal = type(
id).__name__ == 'AreaLamp' and id.renderman.renderman_type == 'PORTAL'
# if id.renderman.nodetree not in bpy.data.node_groups:
# load_tree_from_lib(id)
nt = id.node_tree
if not handle:
handle = id.name
if type(id) == bpy.types.Material:
handle = get_mat_name(handle)
# if ipr we need to iterate instance num on nodes for edits
from . import engine
if engine.ipr and hasattr(id.renderman, 'instance_num'):
if iterate_instance:
id.renderman.instance_num += 1
if id.renderman.instance_num > 0:
handle += "_%d" % id.renderman.instance_num
out = next((n for n in nt.nodes if hasattr(n, 'renderman_node_type') and
n.renderman_node_type == 'output'),
None)
if out is None:
return
nodes_to_export = gather_nodes(out)
ri.ArchiveRecord('comment', "Shader Graph")
for node in nodes_to_export:
shader_node_rib(ri, node, mat_name=handle,
disp_bound=disp_bound, portal=portal)
elif find_node(id, 'ShaderNodeOutputMaterial'):
print("Error Material %s needs a RenderMan BXDF" % id.name)
def get_textures_for_node(node, matName=""):
textures = []
if hasattr(node, 'bl_idname'):
if node.bl_idname == "PxrPtexturePatternNode":
return textures
elif node.bl_idname == "PxrOSLPatternNode":
for input_name, input in node.inputs.items():
if hasattr(input, 'is_texture') and input.is_texture:
prop = input.default_value
out_file_name = get_tex_file_name(prop)
textures.append((replace_frame_num(prop), out_file_name,
['-smode', 'periodic', '-tmode',
'periodic']))
return textures
elif node.bl_idname == 'ShaderNodeGroup':
nt = node.node_tree
for node in nt.nodes:
textures.extend(get_textures_for_node(node, matName=""))
return textures
if hasattr(node, 'prop_meta'):
for prop_name, meta in node.prop_meta.items():
if prop_name in txmake_options.index:
pass
elif hasattr(node, prop_name):
prop = getattr(node, prop_name)
if meta['renderman_type'] == 'page':
continue
# else return a tuple of in name/outname
else:
if ('options' in meta and meta['options'] == 'texture') or \
(node.renderman_node_type == 'light' and
'widget' in meta and meta['widget'] == 'assetIdInput' and prop_name != 'iesProfile'):
out_file_name = get_tex_file_name(prop)
# if they don't match add this to the list
if out_file_name != prop:
if node.renderman_node_type == 'light' and \
"Dome" in node.bl_label:
# no options for now
textures.append(
(replace_frame_num(prop), out_file_name, ['-envlatl']))
else:
# Test and see if options like smode are on
# this node.
if hasattr(node, "smode"):
optionsList = []
for option in txmake_options.index:
partsOfOption = getattr(
txmake_options, option)
if partsOfOption["exportType"] == "name":
optionsList.append("-" + option)
# Float values need converting
# before they are passed to command
# line
if partsOfOption["type"] == "float":
optionsList.append(
str(getattr(node, option)))
else:
optionsList.append(
getattr(node, option))
else:
# Float values need converting
# before they are passed to command
# line
if partsOfOption["type"] == "float":
optionsList.append(
str(getattr(node, option)))
else:
optionsList.append(
"-" + getattr(node, option))
textures.append(
(replace_frame_num(prop), out_file_name, optionsList))
else:
# no options found add the bare minimum
# options for smooth export.
textures.append((replace_frame_num(prop), out_file_name,
['-smode', 'periodic',
'-tmode', 'periodic']))
return textures
def get_textures(id):
textures = []
if id is None or not id.node_tree:
return textures
nt = id.node_tree
for node in nt.nodes:
textures.extend(get_textures_for_node(node, id.name))
return textures
pattern_node_categories_map = {"texture": ["PxrFractal", "PxrBakeTexture", "PxrBakePointCloud", "PxrProjectionLayer", "PxrPtexture", "PxrTexture", "PxrVoronoise", "PxrWorley", "PxrFractalize", "PxrDirt", "PxrLayeredTexture", "PxrMultiTexture"],
"bump": ["PxrBump", "PxrNormalMap", "PxrFlakes", "aaOceanPrmanShader", 'PxrAdjustNormal'],
"color": ["PxrBlackBody", "PxrHairColor", "PxrBlend", "PxrLayeredBlend", "PxrClamp", "PxrExposure", "PxrGamma", "PxrHSL", "PxrInvert", "PxrMix", "PxrProjectionStack", "PxrRamp", "PxrRemap", "PxrThinFilm", "PxrThreshold", "PxrVary", "PxrChecker", "PxrColorCorrect"],
"manifold": ["PxrManifold2D", "PxrRandomTextureManifold", "PxrManifold3D", "PxrManifold3DN", "PxrProjector", "PxrRoundCube", "PxrBumpManifold2D", "PxrTileManifold"],
"geometry": ["PxrDot", "PxrCross", "PxrFacingRatio", "PxrTangentField"],
"script": ["PxrOSL", "PxrSeExpr"],
"utility": ["PxrAttribute", "PxrGeometricAOVs", "PxrMatteID", "PxrPrimvar", "PxrShadedSide", "PxrTee", "PxrToFloat", "PxrToFloat3", "PxrVariable"],
"displace": ["PxrDispScalarLayer", 'PxrDispTransform', 'PxrDispVectorLayer'],
"layer": ['PxrLayer', 'PxrLayerMixer']}
# Node Chatagorization List
def GetPatternCategory(name):
for cat_name, node_names in pattern_node_categories_map.items():
if name in node_names:
return cat_name
else:
return 'deprecated'
# our own base class with an appropriate poll function,
# so the categories only show up in our own tree type
class RendermanPatternNodeCategory(NodeCategory):
@classmethod
def poll(cls, context):
return context.space_data.tree_type == 'ShaderNodeTree'
classes = [
RendermanShaderSocket,
RendermanNodeSocketColor,
RendermanNodeSocketFloat,
RendermanNodeSocketInt,
RendermanNodeSocketString,
RendermanNodeSocketVector,
RendermanNodeSocketStruct,
]
nodetypes = {}
pattern_categories = {}
def register():
for cls in classes:
bpy.utils.register_class(cls)
user_preferences = bpy.context.preferences
prefs = user_preferences.addons[__package__].preferences
categories = {}
for name, arg_file in args_files_in_path(prefs, None).items():
try:
vals = generate_node_type(prefs, name, ET.parse(arg_file).getroot())
if vals:
typename, nodetype = vals
nodetypes[typename] = nodetype
except Exception:
print("Error parsing " + name)
traceback.print_exc()
node_cats = {
'bxdf': ('RenderMan Bxdfs', []),
'light': ('RenderMan Lights', []),
'patterns_texture': ('RenderMan Texture Patterns', []),
'patterns_bump': ('RenderMan Bump Patterns', []),
'patterns_color': ('RenderMan Color Patterns', []),
'patterns_manifold': ('RenderMan Manifold Patterns', []),
'patterns_geometry': ('RenderMan Geometry Patterns', []),
'patterns_utility': ('RenderMan Utility Patterns', []),
'patterns_script': ('RenderMan Script Patterns', []),
'patterns_displace': ('RenderMan Displacement Patterns', []),
'patterns_layer': ('RenderMan Layers', []),
'displacement': ('RenderMan Displacements', [])
}
for name, node_type in nodetypes.items():
node_item = NodeItem(name, label=node_type.bl_label)
if node_type.renderman_node_type == 'pattern':
# insert pxr layer in bxdf
pattern_cat = GetPatternCategory(node_type.bl_label)
if pattern_cat == 'deprecated':
continue
node_cat = 'patterns_' + pattern_cat
node_cats[node_cat][1].append(node_item)
pattern_cat = pattern_cat.capitalize()
if pattern_cat not in pattern_categories:
pattern_categories[pattern_cat] = {}
pattern_categories[pattern_cat][name] = node_type
elif 'LM' in name and node_type.renderman_node_type == 'bxdf':
# skip LM materials
continue
elif node_type.renderman_node_type == 'light' and 'PxrMeshLight' not in name:
# skip light nodes
continue
else:
node_cats[node_type.renderman_node_type][1].append(node_item)
# all categories in a list
node_categories = [
# identifier, label, items list
RendermanPatternNodeCategory("PRMan_output_nodes", "RenderMan Outputs",
items=[NodeItem('RendermanOutputNode', label=RendermanOutputNode.bl_label)]),
]
for name, (desc, items) in node_cats.items():
node_categories.append(RendermanPatternNodeCategory(name, desc,
items=sorted(items,
key=attrgetter('_label'))))
nodeitems_utils.register_node_categories("RENDERMANSHADERNODES",
node_categories)
def unregister():
nodeitems_utils.unregister_node_categories("RENDERMANSHADERNODES")
# bpy.utils.unregister_module(__name__)
for cls in classes:
bpy.utils.unregister_class(cls)
| 40.768443 | 296 | 0.583178 | _add_inputs
from .shader_parameters import node_add_outputs
from .shader_parameters import socket_map
from .shader_parameters import txmake_options, update_conditional_visops
from .util import args_files_in_path
from .util import get_path_list
from .util import rib
from .util import debug
from .util import user_path
from .util import get_real_path
from .util import readOSO
from .cycles_convert import *
from operator import attrgetter, itemgetter
import os.path
from time import sleep
import traceback
NODE_LAYOUT_SPLIT = 0.5
group_nodes = ['ShaderNodeGroup', 'NodeGroupInput', 'NodeGroupOutput']
def update_func(self, context):
node = self.node if hasattr(self, 'node') else self
from . import engine
if engine.is_ipr_running():
engine.ipr.issue_shader_edits(node=node)
class RendermanSocket:
ui_open: BoolProperty(name='UI Open', default=True)
def get_pretty_name(self, node):
if node.bl_idname in group_nodes:
return self.name
else:
return self.identifier
def get_value(self, node):
if node.bl_idname in group_nodes or not hasattr(node, self.name):
return self.default_value
else:
return getattr(node, self.name)
def draw_color(self, context, node):
return (0.25, 1.0, 0.25, 1.0)
def draw_value(self, context, layout, node):
layout.prop(node, self.identifier)
def draw(self, context, layout, node, text):
if self.is_linked or self.is_output or self.hide_value or not hasattr(self, 'default_value'):
layout.label(self.get_pretty_name(node))
elif node.bl_idname in group_nodes or node.bl_idname == "PxrOSLPatternNode":
layout.prop(self, 'default_value',
text=self.get_pretty_name(node), slider=True)
else:
layout.prop(node, self.name,
text=self.get_pretty_name(node), slider=True)
class RendermanSocketInterface:
def draw_color(self, context):
return (0.25, 1.0, 0.25, 1.0)
def draw(self, context, layout):
layout.label(self.name)
def from_socket(self, node, socket):
if hasattr(self, 'default_value'):
self.default_value = socket.get_value(node)
self.name = socket.name
def init_socket(self, node, socket, data_path):
sleep(.01)
socket.name = self.name
if hasattr(self, 'default_value'):
socket.default_value = self.default_value
class RendermanNodeSocketFloat(bpy.types.NodeSocketFloat, RendermanSocket):
bl_idname = 'RendermanNodeSocketFloat'
bl_label = 'RenderMan Float Socket'
default_value: FloatProperty(update=update_func)
renderman_type: StringProperty(default='float')
def draw_color(self, context, node):
return (0.5, 0.5, 0.5, 1.0)
class RendermanNodeSocketInterfaceFloat(bpy.types.NodeSocketInterfaceFloat, RendermanSocketInterface):
bl_idname = 'RendermanNodeSocketInterfaceFloat'
bl_label = 'RenderMan Float Socket'
bl_socket_idname = 'RendermanNodeSocketFloat'
default_value: FloatProperty()
def draw_color(self, context):
return (0.5, 0.5, 0.5, 1.0)
class RendermanNodeSocketInt(bpy.types.NodeSocketInt, RendermanSocket):
bl_idname = 'RendermanNodeSocketInt'
bl_label = 'RenderMan Int Socket'
default_value: IntProperty(update=update_func)
renderman_type: StringProperty(default='int')
def draw_color(self, context, node):
return (1.0, 1.0, 1.0, 1.0)
class RendermanNodeSocketInterfaceInt(bpy.types.NodeSocketInterfaceInt, RendermanSocketInterface):
bl_idname = 'RendermanNodeSocketInterfaceInt'
bl_label = 'RenderMan Int Socket'
bl_socket_idname = 'RendermanNodeSocketInt'
default_value: IntProperty()
def draw_color(self, context):
return (1.0, 1.0, 1.0, 1.0)
class RendermanNodeSocketString(bpy.types.NodeSocketString, RendermanSocket):
bl_idname = 'RendermanNodeSocketString'
bl_label = 'RenderMan String Socket'
default_value: StringProperty(update=update_func)
is_texture: BoolProperty(default=False)
renderman_type: StringProperty(default='string')
class RendermanNodeSocketStruct(bpy.types.NodeSocketString, RendermanSocket):
bl_idname = 'RendermanNodeSocketStruct'
bl_label = 'RenderMan Struct Socket'
hide_value = True
renderman_type = 'string'
default_value = ''
class RendermanNodeSocketInterfaceStruct(bpy.types.NodeSocketInterfaceString, RendermanSocketInterface):
bl_idname = 'RendermanNodeSocketInterfaceStruct'
bl_label = 'RenderMan Struct Socket'
bl_socket_idname = 'RendermanNodeSocketStruct'
hide_value = True
class RendermanNodeSocketColor(bpy.types.NodeSocketColor, RendermanSocket):
bl_idname = 'RendermanNodeSocketColor'
bl_label = 'RenderMan Color Socket'
default_value: FloatVectorProperty(size=3,
subtype="COLOR", update=update_func)
renderman_type: StringProperty(default='color')
def draw_color(self, context, node):
return (1.0, 1.0, .5, 1.0)
class RendermanNodeSocketInterfaceColor(bpy.types.NodeSocketInterfaceColor, RendermanSocketInterface):
bl_idname = 'RendermanNodeSocketInterfaceColor'
bl_label = 'RenderMan Color Socket'
bl_socket_idname = 'RendermanNodeSocketColor'
default_value: FloatVectorProperty(size=3,
subtype="COLOR")
def draw_color(self, context):
return (1.0, 1.0, .5, 1.0)
class RendermanNodeSocketVector(RendermanSocket, bpy.types.NodeSocketVector):
bl_idname = 'RendermanNodeSocketVector'
bl_label = 'RenderMan Vector Socket'
hide_value = True
default_value: FloatVectorProperty(size=3,
subtype="EULER", update=update_func)
renderman_type: StringProperty(default='vector')
def draw_color(self, context, node):
return (.25, .25, .75, 1.0)
class RendermanNodeSocketInterfaceVector(bpy.types.NodeSocketInterfaceVector, RendermanSocketInterface):
bl_idname = 'RendermanNodeSocketInterfaceVector'
bl_label = 'RenderMan Vector Socket'
bl_socket_idname = 'RendermanNodeSocketVector'
hide_value = True
default_value: FloatVectorProperty(size=3,
subtype="EULER")
def draw_color(self, context):
return (.25, .25, .75, 1.0)
class RendermanShaderSocket(bpy.types.NodeSocketShader, RendermanSocket):
bl_idname = 'RendermanShaderSocket'
bl_label = 'RenderMan Shader Socket'
hide_value = True
class RendermanShaderSocketInterface(bpy.types.NodeSocketInterfaceShader, RendermanSocketInterface):
bl_idname = 'RendermanShaderInterfaceSocket'
bl_label = 'RenderMan Shader Socket'
bl_socket_idname = 'RendermanShaderSocket'
hide_value = True
class RendermanShadingNode(bpy.types.ShaderNode):
bl_label = 'Output'
def update_mat(self, mat):
if self.renderman_node_type == 'bxdf' and self.outputs['Bxdf'].is_linked:
mat.specular_color = [1, 1, 1]
mat.diffuse_color = [1, 1, 1]
mat.use_transparency = False
mat.specular_intensity = 0
mat.diffuse_intensity = 1
if hasattr(self, "baseColor"):
mat.diffuse_color = self.baseColor
elif hasattr(self, "emitColor"):
mat.diffuse_color = self.emitColor
elif hasattr(self, "diffuseColor"):
mat.diffuse_color = self.diffuseColor
elif hasattr(self, "midColor"):
mat.diffuse_color = self.midColor
elif hasattr(self, "transmissionColor"):
mat.diffuse_color = self.transmissionColor
elif hasattr(self, "frontColor"):
mat.diffuse_color = self.frontColor
if hasattr(self, "specular"):
mat.specular_intensity = self.specular
elif hasattr(self, "SpecularGainR"):
mat.specular_intensity = self.specularGainR
elif hasattr(self, "reflectionGain"):
mat.specular_intensity = self.reflectionGain
if hasattr(self, "specularColor"):
mat.specular_color = self.specularColor
elif hasattr(self, "reflectionColor"):
mat.specular_color = self.reflectionColor
if self.bl_idname in ["PxrGlassBxdfNode", "PxrLMGlassBxdfNode"]:
mat.use_transparency = True
mat.alpha = .5
if self.bl_idname == "PxrLMMetalBxdfNode":
mat.diffuse_color = [0, 0, 0]
mat.specular_intensity = 1
mat.specular_color = self.specularColor
mat.mirror_color = [1, 1, 1]
elif self.bl_idname == "PxrLMPlasticBxdfNode":
mat.specular_intensity = 1
def draw_buttons(self, context, layout):
self.draw_nonconnectable_props(context, layout, self.prop_names)
if self.bl_idname == "PxrOSLPatternNode":
layout.operator("node.refresh_osl_shader")
def draw_buttons_ext(self, context, layout):
self.draw_nonconnectable_props(context, layout, self.prop_names)
def draw_nonconnectable_props(self, context, layout, prop_names):
if self.bl_idname in ['PxrLayerPatternNode', 'PxrSurfaceBxdfNode']:
col = layout.column(align=True)
for prop_name in prop_names:
if prop_name not in self.inputs:
for name in getattr(self, prop_name):
if name.startswith('enable'):
col.prop(self, name, text=prop_name.split('.')[-1])
break
return
if self.bl_idname == "PxrOSLPatternNode" or self.bl_idname == "PxrSeExprPatternNode":
prop = getattr(self, "codetypeswitch")
layout.prop(self, "codetypeswitch")
if getattr(self, "codetypeswitch") == 'INT':
prop = getattr(self, "internalSearch")
layout.prop_search(
self, "internalSearch", bpy.data, "texts", text="")
elif getattr(self, "codetypeswitch") == 'EXT':
prop = getattr(self, "shadercode")
layout.prop(self, "shadercode")
elif getattr(self, "codetypeswitch") == 'NODE':
layout.prop(self, "expression")
else:
if self.__annotations__['plugin_name'] == 'PxrRamp':
nt = bpy.data.node_groups[self.node_group]
if nt:
layout.template_color_ramp(
nt.nodes["ColorRamp"], 'color_ramp')
for prop_name in prop_names:
prop_meta = self.prop_meta[prop_name]
if 'widget' in prop_meta and prop_meta['widget'] == 'null' or \
'hidden' in prop_meta and prop_meta['hidden']:
continue
if prop_name not in self.inputs:
if prop_meta['renderman_type'] == 'page':
ui_prop = prop_name + "_uio"
ui_open = getattr(self, ui_prop)
icon = 'DISCLOSURE_TRI_DOWN' if ui_open \
else 'DISCLOSURE_TRI_RIGHT'
split = layout.split(NODE_LAYOUT_SPLIT)
row = split.row()
row.prop(self, ui_prop, icon=icon, text='',
icon_only=True, emboss=False, slider=True)
row.label(prop_name.split('.')[-1] + ':')
if ui_open:
prop = getattr(self, prop_name)
self.draw_nonconnectable_props(
context, layout, prop)
elif "Subset" in prop_name and prop_meta['type'] == 'string':
layout.prop_search(self, prop_name, bpy.data.scenes[0].renderman,
"object_groups")
else:
layout.prop(self, prop_name, slider=True)
def copy(self, node):
pass
def RefreshNodes(self, context, nodeOR=None, materialOverride=None):
if hasattr(context, "node"):
node = context.node
else:
node = nodeOR
prefs = bpy.context.preferences.addons[__package__].preferences
out_path = user_path(prefs.env_vars.out)
compile_path = os.path.join(user_path(prefs.env_vars.out), "shaders")
if os.path.exists(out_path):
pass
else:
os.mkdir(out_path)
if os.path.exists(os.path.join(out_path, "shaders")):
pass
else:
os.mkdir(os.path.join(out_path, "shaders"))
if getattr(node, "codetypeswitch") == "EXT":
osl_path = user_path(getattr(node, 'shadercode'))
FileName = os.path.basename(osl_path)
FileNameNoEXT = os.path.splitext(FileName)[0]
FileNameOSO = FileNameNoEXT
FileNameOSO += ".oso"
export_path = os.path.join(
user_path(prefs.env_vars.out), "shaders", FileNameOSO)
if os.path.splitext(FileName)[1] == ".oso":
out_file = os.path.join(user_path(prefs.env_vars.out), "shaders", FileNameOSO)
if not os.path.exists(out_file) or not os.path.samefile(osl_path, out_file):
shutil.copy(osl_path, out_file)
ok = True
else:
ok = node.compile_osl(osl_path, compile_path)
elif getattr(node, "codetypeswitch") == "INT" and node.internalSearch:
script = bpy.data.texts[node.internalSearch]
osl_path = bpy.path.abspath(
script.filepath, library=script.library)
if script.is_in_memory or script.is_dirty or \
script.is_modified or not os.path.exists(osl_path):
osl_file = tempfile.NamedTemporaryFile(
mode='w', suffix=".osl", delete=False)
osl_file.write(script.as_string())
osl_file.close()
FileNameNoEXT = os.path.splitext(script.name)[0]
FileNameOSO = FileNameNoEXT
FileNameOSO += ".oso"
node.__annotations__['plugin_name'] = FileNameNoEXT
ok = node.compile_osl(osl_file.name, compile_path, script.name)
export_path = os.path.join(
user_path(prefs.env_vars.out), "shaders", FileNameOSO)
os.remove(osl_file.name)
else:
ok = node.compile_osl(osl_path, compile_path)
FileName = os.path.basename(osl_path)
FileNameNoEXT = os.path.splitext(FileName)[0]
node.__annotations__['plugin_name'] = FileNameNoEXT
FileNameOSO = FileNameNoEXT
FileNameOSO += ".oso"
export_path = os.path.join(
user_path(prefs.env_vars.out), "shaders", FileNameOSO)
else:
ok = False
debug("osl", "Shader cannot be compiled. Shader name not specified")
if ok:
debug('osl', "Shader Compiled Successfully!")
node.outputs.clear()
node.inputs.clear()
prop_names, shader_meta = readOSO(export_path)
debug('osl', prop_names, "MetaInfo: ", shader_meta)
node.label = shader_meta["shader"]
node.__annotations__['plugin_name'] = shader_meta["shader"]
setattr(node, 'shader_meta', shader_meta)
node.setOslProps(prop_names, shader_meta)
else:
debug("osl", "NODE COMPILATION FAILED")
def compile_osl(self, inFile, outPath, nameOverride=""):
if not nameOverride:
FileName = os.path.basename(inFile)
FileNameNoEXT = os.path.splitext(FileName)[0]
out_file = os.path.join(outPath, FileNameNoEXT)
out_file += ".oso"
else:
FileNameNoEXT = os.path.splitext(nameOverride)[0]
out_file = os.path.join(outPath, FileNameNoEXT)
out_file += ".oso"
ok = _cycles.osl_compile(inFile, out_file)
return ok
def update(self):
debug("info", "UPDATING: ", self.name)
@classmethod
def poll(cls, ntree):
if hasattr(ntree, 'bl_idname'):
return ntree.bl_idname == 'ShaderNodeTree'
else:
return True
def setOslProps(self, prop_names, shader_meta):
for prop_name in prop_names:
prop_type = shader_meta[prop_name]["type"]
if shader_meta[prop_name]["IO"] == "out":
self.outputs.new(
socket_map[prop_type], prop_name)
else:
prop_default = shader_meta[prop_name]["default"]
if prop_type == "float":
prop_default = float(prop_default)
elif prop_type == "int":
prop_default = int(float(prop_default))
if prop_type == "matrix":
self.inputs.new(socket_map["struct"], prop_name, prop_name)
elif prop_type == "void":
pass
elif 'lockgeom' in shader_meta[prop_name] and shader_meta[prop_name]['lockgeom'] == 0:
pass
else:
input = self.inputs.new(socket_map[shader_meta[prop_name]["type"]],
prop_name, prop_name)
input.default_value = prop_default
if prop_type == 'struct' or prop_type == 'point':
input.hide_value = True
input.renderman_type = prop_type
debug('osl', "Shader: ", shader_meta["shader"], "Properties: ",
prop_names, "Shader meta data: ", shader_meta)
compileLocation = self.name + "Compile"
class RendermanOutputNode(RendermanShadingNode):
bl_label = 'RenderMan Material'
renderman_node_type = 'output'
bl_icon = 'MATERIAL'
node_tree = None
def init(self, context):
input = self.inputs.new('RendermanShaderSocket', 'Bxdf')
input.type = 'SHADER'
input.hide_value = True
input = self.inputs.new('RendermanShaderSocket', 'Light')
input.hide_value = True
input = self.inputs.new('RendermanShaderSocket', 'Displacement')
input.hide_value = True
def draw_buttons(self, context, layout):
return
def draw_buttons_ext(self, context, layout):
return
# updates
def update(self):
from . import engine
if engine.is_ipr_running():
engine.ipr.last_edit_mat = None
engine.ipr.issue_shader_edits(nt=self.id_data)
# Final output node, used as a dummy to find top level shaders
class RendermanBxdfNode(RendermanShadingNode):
bl_label = 'Bxdf'
renderman_node_type = 'bxdf'
shading_compatibility = {'NEW_SHADING'}
class RendermanDisplacementNode(RendermanShadingNode):
bl_label = 'Displacement'
renderman_node_type = 'displacement'
# Final output node, used as a dummy to find top level shaders
class RendermanPatternNode(RendermanShadingNode):
bl_label = 'Texture'
renderman_node_type = 'pattern'
bl_type = 'TEX_IMAGE'
bl_static_type = 'TEX_IMAGE'
class RendermanLightNode(RendermanShadingNode):
bl_label = 'Output'
renderman_node_type = 'light'
# Generate dynamic types
def generate_node_type(prefs, name, args):
nodeType = args.find("shaderType/tag").attrib['value']
typename = '%s%sNode' % (name, nodeType.capitalize())
nodeDict = {'bxdf': RendermanBxdfNode,
'pattern': RendermanPatternNode,
'displacement': RendermanDisplacementNode,
'light': RendermanLightNode}
if nodeType not in nodeDict.keys():
return
ntype = type(typename, (nodeDict[nodeType],), {'__annotations__': {}})
ntype.bl_label = name
ntype.typename = typename
inputs = [p for p in args.findall('./param')] + \
[p for p in args.findall('./page')]
outputs = [p for p in args.findall('.//output')]
def init(self, context):
if self.renderman_node_type == 'bxdf':
self.outputs.new('RendermanShaderSocket', "Bxdf").type = 'SHADER'
#socket_template = self.socket_templates.new(identifier='Bxdf', name='Bxdf', type='SHADER')
node_add_inputs(self, name, self.prop_names)
node_add_outputs(self)
# if this is PxrLayerSurface set the diffusegain to 0. The default
# of 1 is unintuitive
if self.__annotations__['plugin_name'] == 'PxrLayerSurface':
self.diffuseGain = 0
elif self.renderman_node_type == 'light':
# only make a few sockets connectable
node_add_inputs(self, name, self.prop_names)
self.outputs.new('RendermanShaderSocket', "Light")
elif self.renderman_node_type == 'displacement':
# only make the color connectable
self.outputs.new('RendermanShaderSocket', "Displacement")
node_add_inputs(self, name, self.prop_names)
# else pattern
elif name == "PxrOSL":
self.outputs.clear()
else:
node_add_inputs(self, name, self.prop_names)
node_add_outputs(self)
if name == "PxrRamp":
node_group = bpy.data.node_groups.new(
'PxrRamp_nodegroup', 'ShaderNodeTree')
node_group.nodes.new('ShaderNodeValToRGB')
node_group.use_fake_user = True
self.node_group = node_group.name
update_conditional_visops(self)
def free(self):
if name == "PxrRamp":
bpy.data.node_groups.remove(bpy.data.node_groups[self.node_group])
ntype.init = init
ntype.free = free
if name == 'PxrRamp':
ntype.node_group = StringProperty('color_ramp', default='')
ntype.__annotations__["plugin_name"] = StringProperty(name='Plugin Name',
default=name, options={'HIDDEN'})
# lights cant connect to a node tree in 20.0
class_generate_properties(ntype, name, inputs + outputs)
if nodeType == 'light':
ntype.light_shading_rate = FloatProperty(
name="Light Shading Rate",
description="Shading Rate for this light. \
Leave this high unless detail is missing",
default=100.0)
ntype.light_primary_visibility = BoolProperty(
name="Light Primary Visibility",
description="Camera visibility for this light",
default=True)
bpy.utils.register_class(ntype)
return typename, ntype
# UI
def find_node_input(node, name):
for input in node.inputs:
if input.name == name:
return input
return None
def find_node(material, nodetype):
if material and material.node_tree:
ntree = material.node_tree
active_output_node = None
for node in ntree.nodes:
if getattr(node, "bl_idname", None) == nodetype:
if getattr(node, "is_active_output", True):
return node
if not active_output_node:
active_output_node = node
return active_output_node
return None
def find_node_input(node, name):
for input in node.inputs:
if input.name == name:
return input
return None
def panel_node_draw(layout, context, id_data, output_type, input_name):
ntree = id_data.node_tree
node = find_node(id_data, output_type)
if not node:
layout.label(text="No output node")
else:
input = find_node_input(node, input_name)
#layout.template_node_view(ntree, node, input)
draw_nodes_properties_ui(layout, context, ntree)
return True
def is_renderman_nodetree(material):
return find_node(material, 'RendermanOutputNode')
def draw_nodes_properties_ui(layout, context, nt, input_name='Bxdf',
output_node_type="output"):
output_node = next((n for n in nt.nodes
if hasattr(n, 'renderman_node_type') and n.renderman_node_type == output_node_type), None)
if output_node is None:
return
socket = output_node.inputs[input_name]
node = socket_node_input(nt, socket)
layout.context_pointer_set("nodetree", nt)
layout.context_pointer_set("node", output_node)
layout.context_pointer_set("socket", socket)
split = layout.split(0.35)
split.label(socket.name + ':')
if socket.is_linked:
# for lights draw the shading rate ui.
split.operator_menu_enum("node.add_%s" % input_name.lower(),
"node_type", text=node.bl_label)
else:
split.operator_menu_enum("node.add_%s" % input_name.lower(),
"node_type", text='None')
if node is not None:
draw_node_properties_recursive(layout, context, nt, node)
def socket_node_input(nt, socket):
return next((l.from_node for l in nt.links if l.to_socket == socket), None)
def socket_socket_input(nt, socket):
return next((l.from_socket for l in nt.links if l.to_socket == socket and socket.is_linked),
None)
def linked_sockets(sockets):
if sockets is None:
return []
return [i for i in sockets if i.is_linked]
def draw_node_properties_recursive(layout, context, nt, node, level=0):
def indented_label(layout, label, level):
for i in range(level):
layout.label('', icon='BLANK1')
if label:
layout.label(label)
layout.context_pointer_set("node", node)
layout.context_pointer_set("nodetree", nt)
def draw_props(prop_names, layout, level):
for prop_name in prop_names:
# skip showing the shape for PxrStdAreaLight
if prop_name in ["lightGroup", "rman__Shape", "coneAngle", "penumbraAngle"]:
continue
if prop_name == "codetypeswitch":
row = layout.row()
if node.codetypeswitch == 'INT':
row.prop_search(node, "internalSearch",
bpy.data, "texts", text="")
elif node.codetypeswitch == 'EXT':
row.prop(node, "shadercode")
elif prop_name == "internalSearch" or prop_name == "shadercode" or prop_name == "expression":
pass
else:
prop_meta = node.prop_meta[prop_name]
prop = getattr(node, prop_name)
if 'widget' in prop_meta and prop_meta['widget'] == 'null' or \
'hidden' in prop_meta and prop_meta['hidden']:
continue
# else check if the socket with this name is connected
socket = node.inputs[prop_name] if prop_name in node.inputs \
else None
layout.context_pointer_set("socket", socket)
if socket and socket.is_linked:
input_node = socket_node_input(nt, socket)
icon = 'DISCLOSURE_TRI_DOWN' if socket.ui_open \
else 'DISCLOSURE_TRI_RIGHT'
split = layout.split(NODE_LAYOUT_SPLIT)
row = split.row()
indented_label(row, None, level)
row.prop(socket, "ui_open", icon=icon, text='',
icon_only=True, emboss=False)
label = prop_meta.get('label', prop_name)
row.label(label + ':')
if ('type' in prop_meta and prop_meta['type'] == 'vstruct') or prop_name == 'inputMaterial':
split.operator_menu_enum("node.add_layer", "node_type",
text=input_node.bl_label, icon="LAYER_USED")
elif prop_meta['renderman_type'] == 'struct':
split.operator_menu_enum("node.add_manifold", "node_type",
text=input_node.bl_label, icon="LAYER_USED")
elif prop_meta['renderman_type'] == 'normal':
split.operator_menu_enum("node.add_bump", "node_type",
text=input_node.bl_label, icon="LAYER_USED")
else:
split.operator_menu_enum("node.add_pattern", "node_type",
text=input_node.bl_label, icon="LAYER_USED")
if socket.ui_open:
draw_node_properties_recursive(layout, context, nt,
input_node, level=level + 1)
else:
row = layout.row(align=True)
if prop_meta['renderman_type'] == 'page':
ui_prop = prop_name + "_uio"
ui_open = getattr(node, ui_prop)
icon = 'DISCLOSURE_TRI_DOWN' if ui_open \
else 'DISCLOSURE_TRI_RIGHT'
split = layout.split(NODE_LAYOUT_SPLIT)
row = split.row()
for i in range(level):
row.label('', icon='BLANK1')
row.prop(node, ui_prop, icon=icon, text='',
icon_only=True, emboss=False)
sub_prop_names = list(prop)
if node.bl_idname in {"PxrSurfaceBxdfNode", "PxrLayerPatternNode"}:
for pn in sub_prop_names:
if pn.startswith('enable'):
row.prop(node, pn, text='')
sub_prop_names.remove(pn)
break
row.label(prop_name.split('.')[-1] + ':')
if ui_open:
draw_props(sub_prop_names, layout, level + 1)
else:
indented_label(row, None, level)
# indented_label(row, socket.name+':')
# don't draw prop for struct type
if "Subset" in prop_name and prop_meta['type'] == 'string':
row.prop_search(node, prop_name, bpy.data.scenes[0].renderman,
"object_groups")
else:
if prop_meta['renderman_type'] != 'struct':
row.prop(node, prop_name, slider=True)
else:
row.label(prop_meta['label'])
if prop_name in node.inputs:
if ('type' in prop_meta and prop_meta['type'] == 'vstruct') or prop_name == 'inputMaterial':
row.operator_menu_enum("node.add_layer", "node_type",
text='', icon="LAYER_USED")
elif prop_meta['renderman_type'] == 'struct':
row.operator_menu_enum("node.add_manifold", "node_type",
text='', icon="LAYER_USED")
elif prop_meta['renderman_type'] == 'normal':
row.operator_menu_enum("node.add_bump", "node_type",
text='', icon="LAYER_USED")
else:
row.operator_menu_enum("node.add_pattern", "node_type",
text='', icon="LAYER_USED")
if not hasattr(node, '__annotations__') or not "plugin_name" in node.__annotations__ or node.bl_idname == 'PxrOSLPatternNode':
node.draw_buttons(context, layout)
for input in node.inputs:
if input.is_linked:
input_node = socket_node_input(nt, input)
icon = 'DISCLOSURE_TRI_DOWN' if input.show_expanded \
else 'DISCLOSURE_TRI_RIGHT'
split = layout.split(NODE_LAYOUT_SPLIT)
row = split.row()
indented_label(row, None, level)
row.prop(input, "show_expanded", icon=icon, text='',
icon_only=True, emboss=False)
row.label(input.name + ':')
split.operator_menu_enum("node.add_pattern", "node_type",
text=input_node.bl_label, icon="LAYER_USED")
if input.show_expanded:
draw_node_properties_recursive(layout, context, nt,
input_node, level=level + 1)
else:
row = layout.row(align=True)
indented_label(row, None, level)
if input.hide_value:
row.label(input.name)
else:
row.prop(input, 'default_value',
slider=True, text=input.name)
row.operator_menu_enum("node.add_pattern", "node_type",
text='', icon="LAYER_USED")
else:
if node.plugin_name == 'PxrRamp':
dummy_nt = bpy.data.node_groups[node.node_group]
if dummy_nt:
layout.template_color_ramp(
dummy_nt.nodes['ColorRamp'], 'color_ramp')
draw_props(node.prop_names, layout, level)
layout.separator()
# Operators
# connect the pattern nodes in some sensible manner (color output to color input etc)
# TODO more robust
def link_node(nt, from_node, in_socket):
out_socket = None
# first look for resultF/resultRGB
if type(in_socket).__name__ in ['RendermanNodeSocketColor',
'RendermanNodeSocketVector']:
out_socket = from_node.outputs.get('resultRGB',
next((s for s in from_node.outputs
if type(s).__name__ == 'RendermanNodeSocketColor'), None))
elif type(in_socket).__name__ == 'RendermanNodeSocketStruct':
out_socket = from_node.outputs.get('pxrMaterialOut', None)
if not out_socket:
out_socket = from_node.outputs.get('result', None)
else:
out_socket = from_node.outputs.get('resultF',
next((s for s in from_node.outputs
if type(s).__name__ == 'RendermanNodeSocketFloat'), None))
if out_socket:
nt.links.new(out_socket, in_socket)
# bass class for operator to add a node
class Add_Node:
def get_type_items(self, context):
items = []
# if this is a pattern input do columns!
if self.input_type.lower() == 'pattern':
i = 0
for pattern_cat, patterns in pattern_categories.items():
if pattern_cat.lower() in ['layer', 'script', 'manifold', 'bump', 'displace']:
continue
items.append(('', pattern_cat, pattern_cat, '', 0))
for nodename in sorted(patterns):
nodetype = patterns[nodename]
items.append((nodetype.typename, nodetype.bl_label,
nodetype.bl_label, '', i))
i += 1
items.append(('', '', '', '', 0))
items.append(('REMOVE', 'Remove',
'Remove the node connected to this socket', '', i + 1))
items.append(('DISCONNECT', 'Disconnect',
'Disconnect the node connected to this socket', '', i + 2))
elif self.input_type.lower() in ['layer', 'manifold', 'bump']:
patterns = pattern_categories[self.input_type]
for nodename in sorted(patterns):
nodetype = patterns[nodename]
items.append((nodetype.typename, nodetype.bl_label,
nodetype.bl_label))
items.append(('REMOVE', 'Remove',
'Remove the node connected to this socket'))
items.append(('DISCONNECT', 'Disconnect',
'Disconnect the node connected to this socket'))
else:
for nodetype in nodetypes.values():
if self.input_type.lower() == 'light' and nodetype.renderman_node_type == 'light':
if nodetype.__name__ == 'PxrMeshLightLightNode':
items.append((nodetype.typename, nodetype.bl_label,
nodetype.bl_label))
elif nodetype.renderman_node_type == self.input_type.lower():
items.append((nodetype.typename, nodetype.bl_label,
nodetype.bl_label))
items = sorted(items, key=itemgetter(1))
items.append(('REMOVE', 'Remove',
'Remove the node connected to this socket'))
items.append(('DISCONNECT', 'Disconnect',
'Disconnect the node connected to this socket'))
return items
node_type: EnumProperty(name="Node Type",
description='Node type to add to this socket',
items=get_type_items)
def execute(self, context):
new_type = self.properties.node_type
if new_type == 'DEFAULT':
return {'CANCELLED'}
nt = context.nodetree
node = context.node
socket = context.socket
input_node = socket_node_input(nt, socket)
if new_type == 'REMOVE':
nt.nodes.remove(input_node)
return {'FINISHED'}
if new_type == 'DISCONNECT':
link = next((l for l in nt.links if l.to_socket == socket), None)
nt.links.remove(link)
return {'FINISHED'}
# add a new node to existing socket
if input_node is None:
newnode = nt.nodes.new(new_type)
newnode.location = node.location
newnode.location[0] -= 300
newnode.selected = False
if self.input_type in ['Pattern', 'Layer', 'Manifold', 'Bump']:
link_node(nt, newnode, socket)
else:
nt.links.new(newnode.outputs[self.input_type], socket)
# replace input node with a new one
else:
newnode = nt.nodes.new(new_type)
input = socket
old_node = input.links[0].from_node
if self.input_type == 'Pattern':
link_node(nt, newnode, socket)
else:
nt.links.new(newnode.outputs[self.input_type], socket)
newnode.location = old_node.location
active_material = context.active_object.active_material
newnode.update_mat(active_material)
nt.nodes.remove(old_node)
return {'FINISHED'}
class NODE_OT_add_bxdf(bpy.types.Operator, Add_Node):
bl_idname = 'node.add_bxdf'
bl_label = 'Add Bxdf Node'
bl_description = 'Connect a Bxdf to this socket'
input_type: StringProperty(default='Bxdf')
class NODE_OT_add_displacement(bpy.types.Operator, Add_Node):
bl_idname = 'node.add_displacement'
bl_label = 'Add Displacement Node'
bl_description = 'Connect a Displacement shader to this socket'
input_type: StringProperty(default='Displacement')
class NODE_OT_add_light(bpy.types.Operator, Add_Node):
bl_idname = 'node.add_light'
bl_label = 'Add Light Node'
bl_description = 'Connect a Light shader to this socket'
input_type: StringProperty(default='Light')
class NODE_OT_add_pattern(bpy.types.Operator, Add_Node):
bl_idname = 'node.add_pattern'
bl_label = 'Add Pattern Node'
bl_description = 'Connect a Pattern to this socket'
input_type: StringProperty(default='Pattern')
class NODE_OT_add_layer(bpy.types.Operator, Add_Node):
bl_idname = 'node.add_layer'
bl_label = 'Add Layer Node'
bl_description = 'Connect a PxrLayer'
input_type: StringProperty(default='Layer')
class NODE_OT_add_manifold(bpy.types.Operator, Add_Node):
bl_idname = 'node.add_manifold'
bl_label = 'Add Manifold Node'
bl_description = 'Connect a Manifold'
input_type: StringProperty(default='Manifold')
class NODE_OT_add_bump(bpy.types.Operator, Add_Node):
bl_idname = 'node.add_bump'
bl_label = 'Add Bump Node'
bl_description = 'Connect a bump node'
input_type: StringProperty(default='Bump')
# return if this param has a vstuct connection or linked independently
def is_vstruct_or_linked(node, param):
meta = node.prop_meta[param]
if 'vstructmember' not in meta.keys():
return node.inputs[param].is_linked
elif param in node.inputs and node.inputs[param].is_linked:
return True
else:
vstruct_name, vstruct_member = meta['vstructmember'].split('.')
if node.inputs[vstruct_name].is_linked:
from_socket = node.inputs[vstruct_name].links[0].from_socket
vstruct_from_param = "%s_%s" % (
from_socket.identifier, vstruct_member)
return vstruct_conditional(from_socket.node, vstruct_from_param)
else:
return False
# tells if this param has a vstuct connection that is linked and
# conditional met
def is_vstruct_and_linked(node, param):
meta = node.prop_meta[param]
if 'vstructmember' not in meta.keys():
return False
else:
vstruct_name, vstruct_member = meta['vstructmember'].split('.')
if node.inputs[vstruct_name].is_linked:
from_socket = node.inputs[vstruct_name].links[0].from_socket
# if coming from a shader group hookup across that
if from_socket.node.bl_idname == 'ShaderNodeGroup':
ng = from_socket.node.node_tree
group_output = next((n for n in ng.nodes if n.bl_idname == 'NodeGroupOutput'),
None)
if group_output is None:
return False
in_sock = group_output.inputs[from_socket.name]
if len(in_sock.links):
from_socket = in_sock.links[0].from_socket
vstruct_from_param = "%s_%s" % (
from_socket.identifier, vstruct_member)
return vstruct_conditional(from_socket.node, vstruct_from_param)
else:
return False
# gets the value for a node walking up the vstruct chain
def get_val_vstruct(node, param):
if param in node.inputs and node.inputs[param].is_linked:
from_socket = node.inputs[param].links[0].from_socket
return get_val_vstruct(from_socket.node, from_socket.identifier)
elif is_vstruct_and_linked(node, param):
return True
else:
return getattr(node, param)
# parse a vstruct conditional string and return true or false if should link
def vstruct_conditional(node, param):
if not hasattr(node, 'shader_meta') and not hasattr(node, 'output_meta'):
return False
meta = getattr(
node, 'shader_meta') if node.bl_idname == "PxrOSLPatternNode" else node.output_meta
if param not in meta:
return False
meta = meta[param]
if 'vstructConditionalExpr' not in meta.keys():
return True
expr = meta['vstructConditionalExpr']
expr = expr.replace('connect if ', '')
set_zero = False
if ' else set 0' in expr:
expr = expr.replace(' else set 0', '')
set_zero = True
tokens = expr.split()
new_tokens = []
i = 0
num_tokens = len(tokens)
while i < num_tokens:
token = tokens[i]
prepend, append = '', ''
while token[0] == '(':
token = token[1:]
prepend += '('
while token[-1] == ')':
token = token[:-1]
append += ')'
if token == 'set':
i += 1
continue
# is connected change this to node.inputs.is_linked
if i < num_tokens - 2 and tokens[i + 1] == 'is'\
and 'connected' in tokens[i + 2]:
token = "is_vstruct_or_linked(node, '%s')" % token
last_token = tokens[i + 2]
while last_token[-1] == ')':
last_token = last_token[:-1]
append += ')'
i += 3
else:
i += 1
if hasattr(node, token):
token = "get_val_vstruct(node, '%s')" % token
new_tokens.append(prepend + token + append)
if 'if' in new_tokens and 'else' not in new_tokens:
new_tokens.extend(['else', 'False'])
return eval(" ".join(new_tokens))
# Rib export
gains_to_enable = {
'diffuseGain': 'enableDiffuse',
'specularFaceColor': 'enablePrimarySpecular',
'specularEdgeColor': 'enablePrimarySpecular',
'roughSpecularFaceColor': 'enableRoughSpecular',
'roughSpecularEdgeColor': 'enableRoughSpecular',
'clearcoatFaceColor': 'enableClearCoat',
'clearcoatEdgeColor': 'enableClearCoat',
'iridescenceFaceGain': 'enableIridescence',
'iridescenceEdgeGain': 'enableIridescence',
'fuzzGain': 'enableFuzz',
'subsurfaceGain': 'enableSubsurface',
'singlescatterGain': 'enableSingleScatter',
'singlescatterDirectGain': 'enableSingleScatter',
'refractionGain': 'enableGlass',
'reflectionGain': 'enableGlass',
'glowGain': 'enableGlow',
}
# generate param list
def gen_params(ri, node, mat_name=None):
params = {}
# If node is OSL node get properties from dynamic location.
if node.bl_idname == "PxrOSLPatternNode":
if getattr(node, "codetypeswitch") == "EXT":
prefs = bpy.context.preferences.addons[__package__].preferences
osl_path = user_path(getattr(node, 'shadercode'))
FileName = os.path.basename(osl_path)
FileNameNoEXT,ext = os.path.splitext(FileName)
out_file = os.path.join(
user_path(prefs.env_vars.out), "shaders", FileName)
if ext == ".oso":
if not os.path.exists(out_file) or not os.path.samefile(osl_path, out_file):
if not os.path.exists(os.path.join(user_path(prefs.env_vars.out), "shaders")):
os.mkdir(os.path.join(user_path(prefs.env_vars.out), "shaders"))
shutil.copy(osl_path, out_file)
for input_name, input in node.inputs.items():
prop_type = input.renderman_type
if input.is_linked:
to_socket = input
from_socket = input.links[0].from_socket
params['reference %s %s' % (prop_type, input_name)] = \
[get_output_param_str(
from_socket.node, mat_name, from_socket, to_socket)]
elif type(input) != RendermanNodeSocketStruct:
params['%s %s' % (prop_type, input_name)] = \
rib(input.default_value,
type_hint=prop_type)
# Special case for SeExpr Nodes. Assume that the code will be in a file so
# that needs to be extracted.
elif node.bl_idname == "PxrSeExprPatternNode":
fileInputType = node.codetypeswitch
for prop_name, meta in node.prop_meta.items():
if prop_name in ["codetypeswitch", 'filename']:
pass
elif prop_name == "internalSearch" and fileInputType == 'INT':
if node.internalSearch != "":
script = bpy.data.texts[node.internalSearch]
params['%s %s' % ("string",
"expression")] = \
rib(script.as_string(),
type_hint=meta['renderman_type'])
elif prop_name == "shadercode" and fileInputType == "NODE":
params['%s %s' % ("string", "expression")] = node.expression
else:
prop = getattr(node, prop_name)
# if input socket is linked reference that
if prop_name in node.inputs and \
node.inputs[prop_name].is_linked:
to_socket = node.inputs[prop_name]
from_socket = to_socket.links[0].from_socket
params['reference %s %s' % (meta['renderman_type'],
meta['renderman_name'])] = \
[get_output_param_str(
from_socket.node, mat_name, from_socket, to_socket)]
# else output rib
else:
params['%s %s' % (meta['renderman_type'],
meta['renderman_name'])] = \
rib(prop, type_hint=meta['renderman_type'])
else:
for prop_name, meta in node.prop_meta.items():
if prop_name in txmake_options.index:
pass
elif node.__annotations__["plugin_name"] == 'PxrRamp' and prop_name in ['colors', 'positions']:
pass
elif(prop_name in ['sblur', 'tblur', 'notes']):
pass
else:
prop = getattr(node, prop_name)
# if property group recurse
if meta['renderman_type'] == 'page':
continue
elif prop_name == 'inputMaterial' or \
('type' in meta and meta['type'] == 'vstruct'):
continue
# if input socket is linked reference that
elif hasattr(node, 'inputs') and prop_name in node.inputs and \
node.inputs[prop_name].is_linked:
to_socket = node.inputs[prop_name]
from_socket = to_socket.links[0].from_socket
from_node = to_socket.links[0].from_node
if 'arraySize' in meta:
params['reference %s[1] %s' % (meta['renderman_type'],
meta['renderman_name'])] \
= [get_output_param_str(
from_node, mat_name, from_socket, to_socket)]
else:
params['reference %s %s' % (meta['renderman_type'],
meta['renderman_name'])] = \
[get_output_param_str(
from_node, mat_name, from_socket, to_socket)]
# see if vstruct linked
elif is_vstruct_and_linked(node, prop_name):
vstruct_name, vstruct_member = meta[
'vstructmember'].split('.')
from_socket = node.inputs[
vstruct_name].links[0].from_socket
temp_mat_name = mat_name
if from_socket.node.bl_idname == 'ShaderNodeGroup':
ng = from_socket.node.node_tree
group_output = next((n for n in ng.nodes if n.bl_idname == 'NodeGroupOutput'),
None)
if group_output is None:
return False
in_sock = group_output.inputs[from_socket.name]
if len(in_sock.links):
from_socket = in_sock.links[0].from_socket
temp_mat_name = mat_name + '.' + from_socket.node.name
vstruct_from_param = "%s_%s" % (
from_socket.identifier, vstruct_member)
if vstruct_from_param in from_socket.node.output_meta:
actual_socket = from_socket.node.output_meta[
vstruct_from_param]
params['reference %s %s' % (meta['renderman_type'],
meta['renderman_name'])] = \
[get_output_param_str(
from_socket.node, temp_mat_name, actual_socket)]
else:
print('Warning! %s not found on %s' %
(vstruct_from_param, from_socket.node.name))
# else output rib
else:
# if struct is not linked continue
if meta['renderman_type'] in ['struct', 'enum']:
continue
# if this is a gain on PxrSurface and the lobe isn't
if node.bl_idname == 'PxrSurfaceBxdfNode' and \
prop_name in gains_to_enable and \
not getattr(node, gains_to_enable[prop_name]):
val = [0, 0, 0] if meta[
'renderman_type'] == 'color' else 0
params['%s %s' % (meta['renderman_type'],
meta['renderman_name'])] = val
elif 'options' in meta and meta['options'] == 'texture' \
and node.bl_idname != "PxrPtexturePatternNode" or \
('widget' in meta and meta['widget'] == 'assetIdInput' and prop_name != 'iesProfile'):
params['%s %s' % (meta['renderman_type'],
meta['renderman_name'])] = \
rib(get_tex_file_name(prop),
type_hint=meta['renderman_type'])
elif 'arraySize' in meta:
if type(prop) == int:
prop = [prop]
params['%s[%d] %s' % (meta['renderman_type'], len(prop),
meta['renderman_name'])] \
= rib(prop)
else:
params['%s %s' % (meta['renderman_type'],
meta['renderman_name'])] = \
rib(prop, type_hint=meta['renderman_type'])
if node.__annotations__["plugin_name"] == 'PxrRamp':
nt = bpy.data.node_groups[node.node_group]
if nt:
dummy_ramp = nt.nodes['ColorRamp']
colors = []
positions = []
positions.append(float(dummy_ramp.color_ramp.elements[0].position))
colors.extend(dummy_ramp.color_ramp.elements[0].color[:3])
for e in dummy_ramp.color_ramp.elements:
positions.append(float(e.position))
colors.extend(e.color[:3])
positions.append(
float(dummy_ramp.color_ramp.elements[-1].position))
colors.extend(dummy_ramp.color_ramp.elements[-1].color[:3])
params['color[%d] colors' % len(positions)] = colors
params['float[%d] positions' % len(positions)] = positions
return params
def create_rman_surface(nt, parent_node, input_index, node_type="PxrSurfaceBxdfNode"):
layer = nt.nodes.new(node_type)
nt.links.new(layer.outputs[0], parent_node.inputs[input_index])
setattr(layer, 'enableDiffuse', False)
layer.location = parent_node.location
layer.diffuseGain = 0
layer.location[0] -= 300
return layer
combine_nodes = ['ShaderNodeAddShader', 'ShaderNodeMixShader']
def convert_cycles_bsdf(nt, rman_parent, node, input_index):
if node.bl_idname in combine_nodes:
i = 0 if node.bl_idname == 'ShaderNodeAddShader' else 1
node1 = node.inputs[
0 + i].links[0].from_node if node.inputs[0 + i].is_linked else None
node2 = node.inputs[
1 + i].links[0].from_node if node.inputs[1 + i].is_linked else None
if not node1 and not node2:
return
elif not node1:
convert_cycles_bsdf(nt, rman_parent, node2, input_index)
elif not node2:
convert_cycles_bsdf(nt, rman_parent, node1, input_index)
# to make a mixer
elif node.bl_idname == 'ShaderNodeMixShader' or node1.bl_idname in combine_nodes \
or node2.bl_idname in combine_nodes or \
node1.bl_idname == 'ShaderNodeGroup' or node2.bl_idname == 'ShaderNodeGroup' \
or (bsdf_map[node1.bl_idname][0] == bsdf_map[node2.bl_idname][0]):
mixer = nt.nodes.new('PxrLayerMixerPatternNode')
# if parent is output make a pxr surface first
nt.links.new(mixer.outputs["pxrMaterialOut"],
rman_parent.inputs[input_index])
offset_node_location(rman_parent, mixer, node)
# set the layer masks
if node.bl_idname == 'ShaderNodeAddShader':
mixer.layer1Mask = .5
else:
convert_cycles_input(
nt, node.inputs['Fac'], mixer, 'layer1Mask')
# make a new node for each
convert_cycles_bsdf(nt, mixer, node1, 0)
convert_cycles_bsdf(nt, mixer, node2, 1)
# this is a heterogenous mix of add
else:
if rman_parent.__annotations__["plugin_name"] == 'PxrLayerMixer':
old_parent = rman_parent
rman_parent = create_rman_surface(nt, rman_parent, input_index,
'PxrLayerPatternNode')
offset_node_location(old_parent, rman_parent, node)
convert_cycles_bsdf(nt, rman_parent, node1, 0)
convert_cycles_bsdf(nt, rman_parent, node2, 1)
# else set lobe on parent
elif 'Bsdf' in node.bl_idname or node.bl_idname == 'ShaderNodeSubsurfaceScattering':
if rman_parent.__annotations__["plugin_name"] == 'PxrLayerMixer':
old_parent = rman_parent
rman_parent = create_rman_surface(nt, rman_parent, input_index,
'PxrLayerPatternNode')
offset_node_location(old_parent, rman_parent, node)
node_type = node.bl_idname
bsdf_map[node_type][1](nt, node, rman_parent)
# if we find an emission node, naively make it a meshlight
# note this will only make the last emission node the light
elif node.bl_idname == 'ShaderNodeEmission':
output = next((n for n in nt.nodes if hasattr(n, 'renderman_node_type') and
n.renderman_node_type == 'output'),
None)
meshlight = nt.nodes.new("PxrMeshLightLightNode")
nt.links.new(meshlight.outputs[0], output.inputs["Light"])
meshlight.location = output.location
meshlight.location[0] -= 300
convert_cycles_input(
nt, node.inputs['Strength'], meshlight, "intensity")
if node.inputs['Color'].is_linked:
convert_cycles_input(
nt, node.inputs['Color'], meshlight, "textureColor")
else:
setattr(meshlight, 'lightColor', node.inputs[
'Color'].default_value[:3])
else:
rman_node = convert_cycles_node(nt, node)
nt.links.new(rman_node.outputs[0], rman_parent.inputs[input_index])
def convert_cycles_displacement(nt, surface_node, displace_socket):
# for now just do bump
if displace_socket.is_linked:
bump = nt.nodes.new("PxrBumpPatternNode")
nt.links.new(bump.outputs[0], surface_node.inputs['bumpNormal'])
bump.location = surface_node.location
bump.location[0] -= 200
bump.location[1] -= 100
convert_cycles_input(nt, displace_socket, bump, "inputBump")
# return
# if displace_socket.is_linked:
# displace = nt.nodes.new("PxrDisplaceDisplacementNode")
# nt.links.new(displace.outputs[0], output_node.inputs['Displacement'])
# displace.location = output_node.location
# displace.location[0] -= 200
# displace.location[1] -= 100
# setattr(displace, 'dispAmount', .01)
# convert_cycles_input(nt, displace_socket, displace, "dispScalar")
# could make this more robust to shift the entire nodetree to below the
# bounds of the cycles nodetree
def set_ouput_node_location(nt, output_node, cycles_output):
output_node.location = cycles_output.location
output_node.location[1] -= 500
def offset_node_location(rman_parent, rman_node, cycles_node):
linked_socket = next((sock for sock in cycles_node.outputs if sock.is_linked),
None)
rman_node.location = rman_parent.location
if linked_socket:
rman_node.location += (cycles_node.location -
linked_socket.links[0].to_node.location)
def convert_cycles_nodetree(id, output_node, reporter):
# find base node
from . import cycles_convert
cycles_convert.converted_nodes = {}
nt = id.node_tree
reporter({'INFO'}, 'Converting material ' + id.name + ' to RenderMan')
cycles_output_node = find_node(id, 'ShaderNodeOutputMaterial')
if not cycles_output_node:
reporter({'WARNING'}, 'No Cycles output found ' + id.name)
return False
# if no bsdf return false
if not cycles_output_node.inputs[0].is_linked:
reporter({'WARNING'}, 'No Cycles bsdf found ' + id.name)
return False
# set the output node location
set_ouput_node_location(nt, output_node, cycles_output_node)
# walk tree
cycles_convert.report = reporter
begin_cycles_node = cycles_output_node.inputs[0].links[0].from_node
# if this is an emission use PxrLightEmission
if begin_cycles_node.bl_idname == "ShaderNodeEmission":
meshlight = nt.nodes.new("PxrMeshLightLightNode")
nt.links.new(meshlight.outputs[0], output_node.inputs["Light"])
offset_node_location(output_node, meshlight, begin_cycles_node)
convert_cycles_input(nt, begin_cycles_node.inputs[
'Strength'], meshlight, "intensity")
if begin_cycles_node.inputs['Color'].is_linked:
convert_cycles_input(nt, begin_cycles_node.inputs[
'Color'], meshlight, "textureColor")
else:
setattr(meshlight, 'lightColor', begin_cycles_node.inputs[
'Color'].default_value[:3])
bxdf = nt.nodes.new('PxrBlackBxdfNode')
nt.links.new(bxdf.outputs[0], output_node.inputs["Bxdf"])
else:
base_surface = create_rman_surface(nt, output_node, 0)
offset_node_location(output_node, base_surface, begin_cycles_node)
convert_cycles_bsdf(nt, base_surface, begin_cycles_node, 0)
convert_cycles_displacement(
nt, base_surface, cycles_output_node.inputs[2])
return True
cycles_node_map = {
'ShaderNodeAttribute': 'node_attribute',
'ShaderNodeBlackbody': 'node_checker_blackbody',
'ShaderNodeTexBrick': 'node_brick_texture',
'ShaderNodeBrightContrast': 'node_brightness',
'ShaderNodeTexChecker': 'node_checker_texture',
'ShaderNodeBump': 'node_bump',
'ShaderNodeCameraData': 'node_camera',
'ShaderNodeTexChecker': 'node_checker_texture',
'ShaderNodeCombineHSV': 'node_combine_hsv',
'ShaderNodeCombineRGB': 'node_combine_rgb',
'ShaderNodeCombineXYZ': 'node_combine_xyz',
'ShaderNodeTexEnvironment': 'node_environment_texture',
'ShaderNodeFresnel': 'node_fresnel',
'ShaderNodeGamma': 'node_gamma',
'ShaderNodeNewGeometry': 'node_geometry',
'ShaderNodeTexGradient': 'node_gradient_texture',
'ShaderNodeHairInfo': 'node_hair_info',
'ShaderNodeInvert': 'node_invert',
'ShaderNodeHueSaturation': 'node_hsv',
'ShaderNodeTexImage': 'node_image_texture',
'ShaderNodeHueSaturation': 'node_hsv',
'ShaderNodeLayerWeight': 'node_layer_weight',
'ShaderNodeLightFalloff': 'node_light_falloff',
'ShaderNodeLightPath': 'node_light_path',
'ShaderNodeTexMagic': 'node_magic_texture',
'ShaderNodeMapping': 'node_mapping',
'ShaderNodeMath': 'node_math',
'ShaderNodeMixRGB': 'node_mix',
'ShaderNodeTexMusgrave': 'node_musgrave_texture',
'ShaderNodeTexNoise': 'node_noise_texture',
'ShaderNodeNormal': 'node_normal',
'ShaderNodeNormalMap': 'node_normal_map',
'ShaderNodeObjectInfo': 'node_object_info',
'ShaderNodeParticleInfo': 'node_particle_info',
'ShaderNodeRGBCurve': 'node_rgb_curves',
'ShaderNodeValToRGB': 'node_rgb_ramp',
'ShaderNodeSeparateHSV': 'node_separate_hsv',
'ShaderNodeSeparateRGB': 'node_separate_rgb',
'ShaderNodeSeparateXYZ': 'node_separate_xyz',
'ShaderNodeTexSky': 'node_sky_texture',
'ShaderNodeTangent': 'node_tangent',
'ShaderNodeTexCoord': 'node_texture_coordinate',
'ShaderNodeUVMap': 'node_uv_map',
'ShaderNodeValue': 'node_value',
'ShaderNodeVectorCurves': 'node_vector_curves',
'ShaderNodeVectorMath': 'node_vector_math',
'ShaderNodeVectorTransform': 'node_vector_transform',
'ShaderNodeTexVoronoi': 'node_voronoi_texture',
'ShaderNodeTexWave': 'node_wave_texture',
'ShaderNodeWavelength': 'node_wavelength',
'ShaderNodeWireframe': 'node_wireframe',
}
def get_mat_name(mat_name):
return mat_name.replace(' ', '')
def get_node_name(node, mat_name):
return "%s.%s" % (mat_name, node.name.replace(' ', ''))
def get_socket_name(node, socket):
if type(socket) == dict:
return socket['name'].replace(' ', '')
# if this is a renderman node we can just use the socket name,
else:
if not hasattr('node', '__annotations__') or not 'plugin_name' in node.__annotations__:
if socket.name in node.inputs and socket.name in node.outputs:
suffix = 'Out' if socket.is_output else 'In'
return socket.name.replace(' ', '') + suffix
return socket.identifier.replace(' ', '')
def get_socket_type(node, socket):
sock_type = socket.type.lower()
if sock_type == 'rgba':
return 'color'
elif sock_type == 'value':
return 'float'
elif sock_type == 'vector':
return 'point'
else:
return sock_type
# do we need to convert this socket?
def do_convert_socket(from_socket, to_socket):
if not to_socket:
return False
return (is_float_type(from_socket) and is_float3_type(to_socket)) or \
(is_float3_type(from_socket) and is_float_type(to_socket))
def build_output_param_str(mat_name, from_node, from_socket, convert_socket=False):
from_node_name = get_node_name(from_node, mat_name)
from_sock_name = get_socket_name(from_node, from_socket)
# replace with the convert node's output
if convert_socket:
if is_float_type(from_socket):
return "convert_%s.%s:resultRGB" % (from_node_name, from_sock_name)
else:
return "convert_%s.%s:resultF" % (from_node_name, from_sock_name)
else:
return "%s:%s" % (from_node_name, from_sock_name)
def get_output_param_str(node, mat_name, socket, to_socket=None):
if node.bl_idname == 'ShaderNodeGroup':
ng = node.node_tree
group_output = next((n for n in ng.nodes if n.bl_idname == 'NodeGroupOutput'),
None)
if group_output is None:
return "error:error"
in_sock = group_output.inputs[socket.name]
if len(in_sock.links):
link = in_sock.links[0]
return build_output_param_str(mat_name + '.' + node.name, link.from_node, link.from_socket, do_convert_socket(link.from_socket, to_socket))
else:
return "error:error"
if node.bl_idname == 'NodeGroupInput':
global current_group_node
if current_group_node is None:
return "error:error"
in_sock = current_group_node.inputs[socket.name]
if len(in_sock.links):
link = in_sock.links[0]
return build_output_param_str(mat_name, link.from_node, link.from_socket, do_convert_socket(link.from_socket, to_socket))
else:
return "error:error"
return build_output_param_str(mat_name, node, socket, do_convert_socket(socket, to_socket))
current_group_node = None
def translate_node_group(ri, group_node, mat_name):
ng = group_node.node_tree
out = next((n for n in ng.nodes if n.bl_idname == 'NodeGroupOutput'),
None)
if out is None:
return
nodes_to_export = gather_nodes(out)
global current_group_node
current_group_node = group_node
for node in nodes_to_export:
shader_node_rib(ri, node, mat_name=(mat_name + '.' + group_node.name))
current_group_node = None
def translate_cycles_node(ri, node, mat_name):
if node.bl_idname == 'ShaderNodeGroup':
translate_node_group(ri, node, mat_name)
return
if node.bl_idname not in cycles_node_map.keys():
print('No translation for node of type %s named %s' %
(node.bl_idname, node.name))
return
mapping = cycles_node_map[node.bl_idname]
params = {}
for in_name, input in node.inputs.items():
param_name = "%s %s" % (get_socket_type(
node, input), get_socket_name(node, input))
if input.is_linked:
param_name = 'reference ' + param_name
link = input.links[0]
param_val = get_output_param_str(
link.from_node, mat_name, link.from_socket, input)
else:
param_val = rib(input.default_value,
type_hint=get_socket_type(node, input))
if input.type == 'VECTOR' and param_val == [0.0, 0.0, 0.0]:
continue
params[param_name] = param_val
ramp_size = 256
if node.bl_idname == 'ShaderNodeValToRGB':
colors = []
alphas = []
for i in range(ramp_size):
c = node.color_ramp.evaluate(float(i) / (ramp_size - 1.0))
colors.extend(c[:3])
alphas.append(c[3])
params['color[%d] ramp_color' % ramp_size] = colors
params['float[%d] ramp_alpha' % ramp_size] = alphas
elif node.bl_idname == 'ShaderNodeVectorCurve':
colors = []
node.mapping.initialize()
r = node.mapping.curves[0]
g = node.mapping.curves[1]
b = node.mapping.curves[2]
for i in range(ramp_size):
v = float(i) / (ramp_size - 1.0)
colors.extend([r.evaluate(v), g.evaluate(v), b.evaluate(v)])
params['color[%d] ramp' % ramp_size] = colors
elif node.bl_idname == 'ShaderNodeRGBCurve':
colors = []
node.mapping.initialize()
c = node.mapping.curves[0]
r = node.mapping.curves[1]
g = node.mapping.curves[2]
b = node.mapping.curves[3]
for i in range(ramp_size):
v = float(i) / (ramp_size - 1.0)
c_val = c.evaluate(v)
colors.extend([r.evaluate(v) * c_val, g.evaluate(v)
* c_val, b.evaluate(v) * c_val])
params['color[%d] ramp' % ramp_size] = colors
ri.Pattern(mapping, get_node_name(node, mat_name), params)
def shader_node_rib(ri, node, mat_name, disp_bound=0.0, portal=False):
if type(node) == type(()):
shader, from_node, from_socket = node
input_type = 'float' if shader == 'PxrToFloat3' else 'color'
node_name = 'convert_%s.%s' % (get_node_name(
from_node, mat_name), get_socket_name(from_node, from_socket))
if from_node.bl_idname == 'ShaderNodeGroup':
node_name = 'convert_' + get_output_param_str(
from_node, mat_name, from_socket).replace(':', '.')
params = {"reference %s input" % input_type: get_output_param_str(
from_node, mat_name, from_socket)}
params['__instanceid'] = node_name
ri.Pattern(shader, node_name, params)
return
elif not hasattr(node, 'renderman_node_type'):
return translate_cycles_node(ri, node, mat_name)
params = gen_params(ri, node, mat_name)
instance = mat_name + '.' + node.name
params['__instanceid'] = instance
if 'string filename' in params:
params['string filename'] = bpy.path.abspath(params['string filename'])
if node.renderman_node_type == "pattern":
if node.bl_label == 'PxrOSL':
shader = node.__annotations__['plugin_name']
if shader:
ri.Pattern(shader, instance, params)
else:
ri.Pattern(node.bl_label, instance, params)
elif node.renderman_node_type == "light":
light_group_name = ''
scene = bpy.context.scene
for lg in scene.renderman.light_groups:
if mat_name in lg.members.keys():
light_group_name = lg.name
break
params['string lightGroup'] = light_group_name
params['__instanceid'] = mat_name
light_name = node.bl_label
if light_name == 'PxrPortalLight':
if mat_name in bpy.data.lamps:
lamp = bpy.context.scene.objects.active
if lamp and lamp.parent and lamp.parent.type == 'LAMP' \
and lamp.parent.data.renderman.renderman_type == 'ENV':
from .export import property_group_to_params
parent_node = lamp.parent.data.renderman.get_light_node()
parent_params = property_group_to_params(parent_node)
params['string domeSpace'] = lamp.parent.name
params['string portalName'] = mat_name
params['string domeColorMap'] = parent_params['string lightColorMap']
params['float intensity'] = parent_params['float intensity'] * params['float intensityMult']
del params['float intensityMult']
params['float exposure'] = parent_params['float exposure']
params['color lightColor'] = [i*j for i,j in zip(parent_params['color lightColor'],params['color tint'])]
del params['color tint']
if not params['int enableTemperature']:
params['int enableTemperature'] = parent_params['int enableTemperature']
params['float temperature'] = parent_params['float temperature']
params['float specular'] *= parent_params['float specular']
params['float diffuse'] *= parent_params['float diffuse']
ri.Light(light_name, mat_name, params)
elif node.renderman_node_type == "lightfilter":
params['__instanceid'] = mat_name
light_name = node.bl_label
ri.LightFilter(light_name, mat_name, params)
elif node.renderman_node_type == "displacement":
ri.Attribute('displacementbound', {'sphere': disp_bound})
ri.Displace(node.bl_label, mat_name, params)
else:
ri.Bxdf(node.bl_label, instance, params)
def replace_frame_num(prop):
frame_num = bpy.data.scenes[0].frame_current
prop = prop.replace('$f4', str(frame_num).zfill(4))
prop = prop.replace('$F4', str(frame_num).zfill(4))
prop = prop.replace('$f3', str(frame_num).zfill(3))
prop = prop.replace('$F3', str(frame_num).zfill(3))
return prop
def get_tex_file_name(prop):
prop = replace_frame_num(prop)
prop = bpy.path.basename(prop)
part = prop.rpartition('.')
prop = part[0]
if prop != '' and part[2].lower() != 'tex':
_p_ = bpy.context.scene.renderman.path_texture_output
_s_ = "" if _p_.endswith("/") or _p_.endswith("\\") else "/"
_f_ = "{}{}{}{}".format(_p_, _s_, prop, ".tex")
return user_path(_f_)
else:
return prop
def is_same_type(socket1, socket2):
return (type(socket1) == type(socket2)) or (is_float_type(socket1) and is_float_type(socket2)) or \
(is_float3_type(socket1) and is_float3_type(socket2))
def is_float_type(socket):
if type(socket) == type({}):
return socket['renderman_type'] in ['int', 'float']
elif hasattr(socket.node, '__annotations__') and 'plugin_name' in node.__annotations__:
prop_meta = getattr(socket.node, 'output_meta', [
]) if socket.is_output else getattr(socket.node, 'prop_meta', [])
if socket.name in prop_meta:
return prop_meta[socket.name]['renderman_type'] in ['int', 'float']
else:
return socket.type in ['INT', 'VALUE']
def is_float3_type(socket):
if type(socket) == type({}):
return socket['renderman_type'] in ['int', 'float']
elif hasattr(socket.node, '__annotations__') and 'plugin_name' in node.__annotations__:
prop_meta = getattr(socket.node, 'output_meta', [
]) if socket.is_output else getattr(socket.node, 'prop_meta', [])
if socket.name in prop_meta:
return prop_meta[socket.name]['renderman_type'] in ['color', 'vector', 'normal']
else:
return socket.type in ['RGBA', 'VECTOR']
def gather_nodes(node):
nodes = []
for socket in node.inputs:
if socket.is_linked:
link = socket.links[0]
for sub_node in gather_nodes(socket.links[0].from_node):
if sub_node not in nodes:
nodes.append(sub_node)
if is_float_type(link.from_socket) and is_float3_type(socket):
convert_node = ('PxrToFloat3', link.from_node,
link.from_socket)
if convert_node not in nodes:
nodes.append(convert_node)
elif is_float3_type(link.from_socket) and is_float_type(socket):
convert_node = ('PxrToFloat', link.from_node, link.from_socket)
if convert_node not in nodes:
nodes.append(convert_node)
if hasattr(node, 'renderman_node_type') and node.renderman_node_type != 'output':
nodes.append(node)
elif not hasattr(node, 'renderman_node_type') and node.bl_idname not in ['ShaderNodeOutputMaterial', 'NodeGroupInput', 'NodeGroupOutput']:
nodes.append(node)
return nodes
def export_shader_nodetree(ri, id, handle=None, disp_bound=0.0, iterate_instance=False):
if id and id.node_tree:
if is_renderman_nodetree(id):
portal = type(
id).__name__ == 'AreaLamp' and id.renderman.renderman_type == 'PORTAL'
nt = id.node_tree
if not handle:
handle = id.name
if type(id) == bpy.types.Material:
handle = get_mat_name(handle)
from . import engine
if engine.ipr and hasattr(id.renderman, 'instance_num'):
if iterate_instance:
id.renderman.instance_num += 1
if id.renderman.instance_num > 0:
handle += "_%d" % id.renderman.instance_num
out = next((n for n in nt.nodes if hasattr(n, 'renderman_node_type') and
n.renderman_node_type == 'output'),
None)
if out is None:
return
nodes_to_export = gather_nodes(out)
ri.ArchiveRecord('comment', "Shader Graph")
for node in nodes_to_export:
shader_node_rib(ri, node, mat_name=handle,
disp_bound=disp_bound, portal=portal)
elif find_node(id, 'ShaderNodeOutputMaterial'):
print("Error Material %s needs a RenderMan BXDF" % id.name)
def get_textures_for_node(node, matName=""):
textures = []
if hasattr(node, 'bl_idname'):
if node.bl_idname == "PxrPtexturePatternNode":
return textures
elif node.bl_idname == "PxrOSLPatternNode":
for input_name, input in node.inputs.items():
if hasattr(input, 'is_texture') and input.is_texture:
prop = input.default_value
out_file_name = get_tex_file_name(prop)
textures.append((replace_frame_num(prop), out_file_name,
['-smode', 'periodic', '-tmode',
'periodic']))
return textures
elif node.bl_idname == 'ShaderNodeGroup':
nt = node.node_tree
for node in nt.nodes:
textures.extend(get_textures_for_node(node, matName=""))
return textures
if hasattr(node, 'prop_meta'):
for prop_name, meta in node.prop_meta.items():
if prop_name in txmake_options.index:
pass
elif hasattr(node, prop_name):
prop = getattr(node, prop_name)
if meta['renderman_type'] == 'page':
continue
else:
if ('options' in meta and meta['options'] == 'texture') or \
(node.renderman_node_type == 'light' and
'widget' in meta and meta['widget'] == 'assetIdInput' and prop_name != 'iesProfile'):
out_file_name = get_tex_file_name(prop)
if out_file_name != prop:
if node.renderman_node_type == 'light' and \
"Dome" in node.bl_label:
# no options for now
textures.append(
(replace_frame_num(prop), out_file_name, ['-envlatl']))
else:
# Test and see if options like smode are on
# this node.
if hasattr(node, "smode"):
optionsList = []
for option in txmake_options.index:
partsOfOption = getattr(
txmake_options, option)
if partsOfOption["exportType"] == "name":
optionsList.append("-" + option)
# Float values need converting
# before they are passed to command
# line
if partsOfOption["type"] == "float":
optionsList.append(
str(getattr(node, option)))
else:
optionsList.append(
getattr(node, option))
else:
# Float values need converting
# before they are passed to command
# line
if partsOfOption["type"] == "float":
optionsList.append(
str(getattr(node, option)))
else:
optionsList.append(
"-" + getattr(node, option))
textures.append(
(replace_frame_num(prop), out_file_name, optionsList))
else:
# no options found add the bare minimum
# options for smooth export.
textures.append((replace_frame_num(prop), out_file_name,
['-smode', 'periodic',
'-tmode', 'periodic']))
return textures
def get_textures(id):
textures = []
if id is None or not id.node_tree:
return textures
nt = id.node_tree
for node in nt.nodes:
textures.extend(get_textures_for_node(node, id.name))
return textures
pattern_node_categories_map = {"texture": ["PxrFractal", "PxrBakeTexture", "PxrBakePointCloud", "PxrProjectionLayer", "PxrPtexture", "PxrTexture", "PxrVoronoise", "PxrWorley", "PxrFractalize", "PxrDirt", "PxrLayeredTexture", "PxrMultiTexture"],
"bump": ["PxrBump", "PxrNormalMap", "PxrFlakes", "aaOceanPrmanShader", 'PxrAdjustNormal'],
"color": ["PxrBlackBody", "PxrHairColor", "PxrBlend", "PxrLayeredBlend", "PxrClamp", "PxrExposure", "PxrGamma", "PxrHSL", "PxrInvert", "PxrMix", "PxrProjectionStack", "PxrRamp", "PxrRemap", "PxrThinFilm", "PxrThreshold", "PxrVary", "PxrChecker", "PxrColorCorrect"],
"manifold": ["PxrManifold2D", "PxrRandomTextureManifold", "PxrManifold3D", "PxrManifold3DN", "PxrProjector", "PxrRoundCube", "PxrBumpManifold2D", "PxrTileManifold"],
"geometry": ["PxrDot", "PxrCross", "PxrFacingRatio", "PxrTangentField"],
"script": ["PxrOSL", "PxrSeExpr"],
"utility": ["PxrAttribute", "PxrGeometricAOVs", "PxrMatteID", "PxrPrimvar", "PxrShadedSide", "PxrTee", "PxrToFloat", "PxrToFloat3", "PxrVariable"],
"displace": ["PxrDispScalarLayer", 'PxrDispTransform', 'PxrDispVectorLayer'],
"layer": ['PxrLayer', 'PxrLayerMixer']}
# Node Chatagorization List
def GetPatternCategory(name):
for cat_name, node_names in pattern_node_categories_map.items():
if name in node_names:
return cat_name
else:
return 'deprecated'
# our own base class with an appropriate poll function,
# so the categories only show up in our own tree type
class RendermanPatternNodeCategory(NodeCategory):
@classmethod
def poll(cls, context):
return context.space_data.tree_type == 'ShaderNodeTree'
classes = [
RendermanShaderSocket,
RendermanNodeSocketColor,
RendermanNodeSocketFloat,
RendermanNodeSocketInt,
RendermanNodeSocketString,
RendermanNodeSocketVector,
RendermanNodeSocketStruct,
]
nodetypes = {}
pattern_categories = {}
def register():
for cls in classes:
bpy.utils.register_class(cls)
user_preferences = bpy.context.preferences
prefs = user_preferences.addons[__package__].preferences
categories = {}
for name, arg_file in args_files_in_path(prefs, None).items():
try:
vals = generate_node_type(prefs, name, ET.parse(arg_file).getroot())
if vals:
typename, nodetype = vals
nodetypes[typename] = nodetype
except Exception:
print("Error parsing " + name)
traceback.print_exc()
node_cats = {
'bxdf': ('RenderMan Bxdfs', []),
'light': ('RenderMan Lights', []),
'patterns_texture': ('RenderMan Texture Patterns', []),
'patterns_bump': ('RenderMan Bump Patterns', []),
'patterns_color': ('RenderMan Color Patterns', []),
'patterns_manifold': ('RenderMan Manifold Patterns', []),
'patterns_geometry': ('RenderMan Geometry Patterns', []),
'patterns_utility': ('RenderMan Utility Patterns', []),
'patterns_script': ('RenderMan Script Patterns', []),
'patterns_displace': ('RenderMan Displacement Patterns', []),
'patterns_layer': ('RenderMan Layers', []),
'displacement': ('RenderMan Displacements', [])
}
for name, node_type in nodetypes.items():
node_item = NodeItem(name, label=node_type.bl_label)
if node_type.renderman_node_type == 'pattern':
# insert pxr layer in bxdf
pattern_cat = GetPatternCategory(node_type.bl_label)
if pattern_cat == 'deprecated':
continue
node_cat = 'patterns_' + pattern_cat
node_cats[node_cat][1].append(node_item)
pattern_cat = pattern_cat.capitalize()
if pattern_cat not in pattern_categories:
pattern_categories[pattern_cat] = {}
pattern_categories[pattern_cat][name] = node_type
elif 'LM' in name and node_type.renderman_node_type == 'bxdf':
# skip LM materials
continue
elif node_type.renderman_node_type == 'light' and 'PxrMeshLight' not in name:
# skip light nodes
continue
else:
node_cats[node_type.renderman_node_type][1].append(node_item)
# all categories in a list
node_categories = [
# identifier, label, items list
RendermanPatternNodeCategory("PRMan_output_nodes", "RenderMan Outputs",
items=[NodeItem('RendermanOutputNode', label=RendermanOutputNode.bl_label)]),
]
for name, (desc, items) in node_cats.items():
node_categories.append(RendermanPatternNodeCategory(name, desc,
items=sorted(items,
key=attrgetter('_label'))))
nodeitems_utils.register_node_categories("RENDERMANSHADERNODES",
node_categories)
def unregister():
nodeitems_utils.unregister_node_categories("RENDERMANSHADERNODES")
# bpy.utils.unregister_module(__name__)
for cls in classes:
bpy.utils.unregister_class(cls)
| true | true |
f71f139049f87b9c55751a72e126f4f93a152ebd | 18,118 | py | Python | nemo/collections/asr/models/label_models.py | dynasty-com/NeMo | 1ac828df423fbcec1b34c650b3a20266bb133dde | [
"Apache-2.0"
] | null | null | null | nemo/collections/asr/models/label_models.py | dynasty-com/NeMo | 1ac828df423fbcec1b34c650b3a20266bb133dde | [
"Apache-2.0"
] | null | null | null | nemo/collections/asr/models/label_models.py | dynasty-com/NeMo | 1ac828df423fbcec1b34c650b3a20266bb133dde | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import os
import pickle as pkl
from typing import Dict, List, Optional, Union
import onnx
import torch
from omegaconf import DictConfig
from omegaconf.omegaconf import open_dict
from pytorch_lightning import Trainer
from nemo.collections.asr.data.audio_to_label import AudioToSpeechLabelDataSet
from nemo.collections.asr.losses.angularloss import AngularSoftmaxLoss
from nemo.collections.asr.parts.features import WaveformFeaturizer
from nemo.collections.asr.parts.perturb import process_augmentations
from nemo.collections.common.losses import CrossEntropyLoss as CELoss
from nemo.collections.common.metrics import TopKClassificationAccuracy
from nemo.core.classes import ModelPT
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.core.classes.exportable import Exportable
from nemo.core.neural_types import *
from nemo.utils import logging
from nemo.utils.export_utils import attach_onnx_to_onnx
__all__ = ['EncDecSpeakerLabelModel', 'ExtractSpeakerEmbeddingsModel']
class EncDecSpeakerLabelModel(ModelPT, Exportable):
"""Encoder decoder class for speaker label models.
Model class creates training, validation methods for setting up data
performing model forward pass.
Expects config dict for
* preprocessor
* Jasper/Quartznet Encoder
* Speaker Decoder
"""
@classmethod
def list_available_models(cls) -> List[PretrainedModelInfo]:
"""
This method returns a list of pre-trained model which can be instantiated directly from NVIDIA's NGC cloud.
Returns:
List of available pre-trained models.
"""
result = []
model = PretrainedModelInfo(
pretrained_model_name="SpeakerNet_recognition",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/SpeakerNet_recognition.nemo",
description="SpeakerNet_recognition model trained end-to-end for speaker recognition purposes with cross_entropy loss. It was trained on voxceleb 1, voxceleb 2 dev datasets and augmented with musan music and noise. Speaker Recognition model achieves 2.65% EER on voxceleb-O cleaned trial file",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="SpeakerNet_verification",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/SpeakerNet_verification.nemo",
description="SpeakerNet_verification model trained end-to-end for speaker verification purposes with arcface angular softmax loss. It was trained on voxceleb 1, voxceleb 2 dev datasets and augmented with musan music and noise. Speaker Verification model achieves 2.12% EER on voxceleb-O cleaned trial file",
)
result.append(model)
return result
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
super().__init__(cfg=cfg, trainer=trainer)
self.preprocessor = EncDecSpeakerLabelModel.from_config_dict(cfg.preprocessor)
self.encoder = EncDecSpeakerLabelModel.from_config_dict(cfg.encoder)
self.decoder = EncDecSpeakerLabelModel.from_config_dict(cfg.decoder)
if 'angular' in cfg.decoder and cfg.decoder['angular']:
logging.info("Training with Angular Softmax Loss")
scale = cfg.loss.scale
margin = cfg.loss.margin
self.loss = AngularSoftmaxLoss(scale=scale, margin=margin)
else:
logging.info("Training with Softmax-CrossEntropy loss")
self.loss = CELoss()
self._accuracy = TopKClassificationAccuracy(top_k=[1], dist_sync_on_step=True)
def __setup_dataloader_from_config(self, config: Optional[Dict]):
if 'augmentor' in config:
augmentor = process_augmentations(config['augmentor'])
else:
augmentor = None
featurizer = WaveformFeaturizer(
sample_rate=config['sample_rate'], int_values=config.get('int_values', False), augmentor=augmentor
)
self.dataset = AudioToSpeechLabelDataSet(
manifest_filepath=config['manifest_filepath'],
labels=config['labels'],
featurizer=featurizer,
max_duration=config.get('max_duration', None),
min_duration=config.get('min_duration', None),
trim=config.get('trim_silence', True),
load_audio=config.get('load_audio', True),
time_length=config.get('time_length', 8),
)
return torch.utils.data.DataLoader(
dataset=self.dataset,
batch_size=config['batch_size'],
collate_fn=self.dataset.fixed_seq_collate_fn,
drop_last=config.get('drop_last', False),
shuffle=config['shuffle'],
num_workers=config.get('num_workers', 2),
pin_memory=config.get('pin_memory', False),
)
def setup_training_data(self, train_data_layer_config: Optional[Union[DictConfig, Dict]]):
if 'shuffle' not in train_data_layer_config:
train_data_layer_config['shuffle'] = True
self._train_dl = self.__setup_dataloader_from_config(config=train_data_layer_config)
def setup_validation_data(self, val_data_layer_config: Optional[Union[DictConfig, Dict]]):
if 'shuffle' not in val_data_layer_config:
val_data_layer_config['shuffle'] = False
val_data_layer_config['labels'] = self.dataset.labels
self._validation_dl = self.__setup_dataloader_from_config(config=val_data_layer_config)
def setup_test_data(self, test_data_layer_params: Optional[Union[DictConfig, Dict]]):
if 'shuffle' not in test_data_layer_params:
test_data_layer_params['shuffle'] = False
if hasattr(self, 'dataset'):
test_data_layer_params['labels'] = self.dataset.labels
self.embedding_dir = test_data_layer_params.get('embedding_dir', './')
self.test_manifest = test_data_layer_params.get('manifest_filepath', None)
self._test_dl = self.__setup_dataloader_from_config(config=test_data_layer_params)
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
if hasattr(self.preprocessor, '_sample_rate'):
audio_eltype = AudioSignal(freq=self.preprocessor._sample_rate)
else:
audio_eltype = AudioSignal()
return {
"input_signal": NeuralType(('B', 'T'), audio_eltype),
"input_signal_length": NeuralType(tuple('B'), LengthsType()),
}
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return {
"logits": NeuralType(('B', 'D'), LogitsType()),
"embs": NeuralType(('B', 'D'), AcousticEncodedRepresentation()),
}
@typecheck()
def forward(self, input_signal, input_signal_length):
processed_signal, processed_signal_len = self.preprocessor(
input_signal=input_signal, length=input_signal_length,
)
encoded, _ = self.encoder(audio_signal=processed_signal, length=processed_signal_len)
logits, embs = self.decoder(encoder_output=encoded)
return logits, embs
# PTL-specific methods
def training_step(self, batch, batch_idx):
audio_signal, audio_signal_len, labels, _ = batch
logits, _ = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)
loss = self.loss(logits=logits, labels=labels)
self.log('loss', loss)
self.log('learning_rate', self._optimizer.param_groups[0]['lr'])
self._accuracy(logits=logits, labels=labels)
top_k = self._accuracy.compute()
for i, top_i in enumerate(top_k):
self.log(f'training_batch_accuracy_top@{i}', top_i)
return {'loss': loss}
def validation_step(self, batch, batch_idx, dataloader_idx: int = 0):
audio_signal, audio_signal_len, labels, _ = batch
logits, _ = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)
loss_value = self.loss(logits=logits, labels=labels)
acc_top_k = self._accuracy(logits=logits, labels=labels)
correct_counts, total_counts = self._accuracy.correct_counts_k, self._accuracy.total_counts_k
return {
'val_loss': loss_value,
'val_correct_counts': correct_counts,
'val_total_counts': total_counts,
'val_acc_top_k': acc_top_k,
}
def multi_validation_epoch_end(self, outputs, dataloader_idx: int = 0):
val_loss_mean = torch.stack([x['val_loss'] for x in outputs]).mean()
correct_counts = torch.stack([x['val_correct_counts'] for x in outputs]).sum(axis=0)
total_counts = torch.stack([x['val_total_counts'] for x in outputs]).sum(axis=0)
self._accuracy.correct_counts_k = correct_counts
self._accuracy.total_counts_k = total_counts
topk_scores = self._accuracy.compute()
logging.info("val_loss: {:.3f}".format(val_loss_mean))
self.log('val_loss', val_loss_mean)
for top_k, score in zip(self._accuracy.top_k, topk_scores):
self.log('val_epoch_accuracy_top@{}'.format(top_k), score)
return {
'val_loss': val_loss_mean,
'val_acc_top_k': topk_scores,
}
def test_step(self, batch, batch_idx, dataloader_idx: int = 0):
audio_signal, audio_signal_len, labels, _ = batch
logits, _ = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)
loss_value = self.loss(logits=logits, labels=labels)
acc_top_k = self._accuracy(logits=logits, labels=labels)
correct_counts, total_counts = self._accuracy.correct_counts_k, self._accuracy.total_counts_k
return {
'test_loss': loss_value,
'test_correct_counts': correct_counts,
'test_total_counts': total_counts,
'test_acc_top_k': acc_top_k,
}
def multi_test_epoch_end(self, outputs, dataloader_idx: int = 0):
test_loss_mean = torch.stack([x['test_loss'] for x in outputs]).mean()
correct_counts = torch.stack([x['test_correct_counts'] for x in outputs]).sum(axis=0)
total_counts = torch.stack([x['test_total_counts'] for x in outputs]).sum(axis=0)
self._accuracy.correct_counts_k = correct_counts
self._accuracy.total_counts_k = total_counts
topk_scores = self._accuracy.compute()
logging.info("test_loss: {:.3f}".format(test_loss_mean))
self.log('test_loss', test_loss_mean)
for top_k, score in zip(self._accuracy.top_k, topk_scores):
self.log('test_epoch_accuracy_top@{}'.format(top_k), score)
return {
'test_loss': test_loss_mean,
'test_acc_top_k': topk_scores,
}
def setup_finetune_model(self, model_config: DictConfig):
"""
setup_finetune_model method sets up training data, validation data and test data with new
provided config, this checks for the previous labels set up during training from scratch, if None,
it sets up labels for provided finetune data from manifest files
Args:
model_config: cfg which has train_ds, optional validation_ds, optional test_ds and
mandatory encoder and decoder model params
make sure you set num_classes correctly for finetune data
Returns: None
"""
if hasattr(self, 'dataset'):
scratch_labels = self.dataset.labels
else:
scratch_labels = None
logging.info("Setting up data loaders with manifests provided from model_config")
if 'train_ds' in model_config and model_config.train_ds is not None:
self.setup_training_data(model_config.train_ds)
else:
raise KeyError("train_ds is not found in model_config but you need it for fine tuning")
if self.dataset.labels is None or len(self.dataset.labels) == 0:
raise ValueError(f'New labels must be non-empty list of labels. But I got: {self.dataset.labels}')
if 'validation_ds' in model_config and model_config.validation_ds is not None:
self.setup_multiple_validation_data(model_config.validation_ds)
if 'test_ds' in model_config and model_config.test_ds is not None:
self.setup_multiple_test_data(model_config.test_ds)
if scratch_labels == self.dataset.labels: # checking for new finetune dataset labels
logging.warning(
"Trained dataset labels are same as finetune dataset labels -- continuing change of decoder parameters"
)
elif scratch_labels is None:
logging.warning(
"Either you provided a dummy manifest file during training from scratch or you restored from a pretrained nemo file"
)
decoder_config = model_config.decoder
new_decoder_config = copy.deepcopy(decoder_config)
if new_decoder_config['num_classes'] != len(self.dataset.labels):
raise ValueError(
"number of classes provided {} is not same as number of different labels in finetuning data: {}".format(
new_decoder_config['num_classes'], len(self.dataset.labels)
)
)
del self.decoder
self.decoder = EncDecSpeakerLabelModel.from_config_dict(new_decoder_config)
with open_dict(self._cfg.decoder):
self._cfg.decoder = new_decoder_config
logging.info(f"Changed decoder output to # {self.decoder._num_classes} classes.")
def export(
self,
output: str,
input_example=None,
output_example=None,
verbose=False,
export_params=True,
do_constant_folding=True,
keep_initializers_as_inputs=False,
onnx_opset_version: int = 12,
try_script: bool = False,
set_eval: bool = True,
check_trace: bool = True,
use_dynamic_axes: bool = True,
):
if input_example is not None or output_example is not None:
logging.warning(
"Passed input and output examples will be ignored and recomputed since"
" EncDecSpeakerModel consists of two separate models (encoder and decoder) with different"
" inputs and outputs."
)
encoder_onnx = self.encoder.export(
os.path.join(os.path.dirname(output), 'encoder_' + os.path.basename(output)),
None, # computed by input_example()
None,
verbose,
export_params,
do_constant_folding,
keep_initializers_as_inputs,
onnx_opset_version,
try_script,
set_eval,
check_trace,
use_dynamic_axes,
)
decoder_onnx = self.decoder.export(
os.path.join(os.path.dirname(output), 'decoder_' + os.path.basename(output)),
None, # computed by input_example()
None,
verbose,
export_params,
do_constant_folding,
keep_initializers_as_inputs,
onnx_opset_version,
try_script,
set_eval,
check_trace,
use_dynamic_axes,
)
output_model = attach_onnx_to_onnx(encoder_onnx, decoder_onnx, "SL")
onnx.save(output_model, output)
class ExtractSpeakerEmbeddingsModel(EncDecSpeakerLabelModel):
"""
This Model class facilitates extraction of speaker embeddings from a pretrained model.
Respective embedding file is saved in self.embedding dir passed through cfg
"""
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
super().__init__(cfg=cfg, trainer=trainer)
def test_step(self, batch, batch_ix):
audio_signal, audio_signal_len, labels, slices = batch
_, embs = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)
return {'embs': embs, 'labels': labels, 'slices': slices}
def test_epoch_end(self, outputs):
embs = torch.cat([x['embs'] for x in outputs])
slices = torch.cat([x['slices'] for x in outputs])
emb_shape = embs.shape[-1]
embs = embs.view(-1, emb_shape).cpu().numpy()
out_embeddings = {}
start_idx = 0
with open(self.test_manifest, 'r') as manifest:
for idx, line in enumerate(manifest.readlines()):
line = line.strip()
dic = json.loads(line)
structure = dic['audio_filepath'].split('/')[-3:]
uniq_name = '@'.join(structure)
if uniq_name in out_embeddings:
raise KeyError("Embeddings for label {} already present in emb dictionary".format(uniq_name))
num_slices = slices[idx]
end_idx = start_idx + num_slices
out_embeddings[uniq_name] = embs[start_idx:end_idx].mean(axis=0)
start_idx = end_idx
embedding_dir = os.path.join(self.embedding_dir, 'embeddings')
if not os.path.exists(embedding_dir):
os.mkdir(embedding_dir)
prefix = self.test_manifest.split('/')[-1].split('.')[-2]
name = os.path.join(embedding_dir, prefix)
pkl.dump(out_embeddings, open(name + '_embeddings.pkl', 'wb'))
logging.info("Saved embedding files to {}".format(embedding_dir))
return {}
| 43.552885 | 319 | 0.669941 |
import copy
import json
import os
import pickle as pkl
from typing import Dict, List, Optional, Union
import onnx
import torch
from omegaconf import DictConfig
from omegaconf.omegaconf import open_dict
from pytorch_lightning import Trainer
from nemo.collections.asr.data.audio_to_label import AudioToSpeechLabelDataSet
from nemo.collections.asr.losses.angularloss import AngularSoftmaxLoss
from nemo.collections.asr.parts.features import WaveformFeaturizer
from nemo.collections.asr.parts.perturb import process_augmentations
from nemo.collections.common.losses import CrossEntropyLoss as CELoss
from nemo.collections.common.metrics import TopKClassificationAccuracy
from nemo.core.classes import ModelPT
from nemo.core.classes.common import PretrainedModelInfo, typecheck
from nemo.core.classes.exportable import Exportable
from nemo.core.neural_types import *
from nemo.utils import logging
from nemo.utils.export_utils import attach_onnx_to_onnx
__all__ = ['EncDecSpeakerLabelModel', 'ExtractSpeakerEmbeddingsModel']
class EncDecSpeakerLabelModel(ModelPT, Exportable):
@classmethod
def list_available_models(cls) -> List[PretrainedModelInfo]:
result = []
model = PretrainedModelInfo(
pretrained_model_name="SpeakerNet_recognition",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/SpeakerNet_recognition.nemo",
description="SpeakerNet_recognition model trained end-to-end for speaker recognition purposes with cross_entropy loss. It was trained on voxceleb 1, voxceleb 2 dev datasets and augmented with musan music and noise. Speaker Recognition model achieves 2.65% EER on voxceleb-O cleaned trial file",
)
result.append(model)
model = PretrainedModelInfo(
pretrained_model_name="SpeakerNet_verification",
location="https://api.ngc.nvidia.com/v2/models/nvidia/nemospeechmodels/versions/1.0.0a5/files/SpeakerNet_verification.nemo",
description="SpeakerNet_verification model trained end-to-end for speaker verification purposes with arcface angular softmax loss. It was trained on voxceleb 1, voxceleb 2 dev datasets and augmented with musan music and noise. Speaker Verification model achieves 2.12% EER on voxceleb-O cleaned trial file",
)
result.append(model)
return result
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
super().__init__(cfg=cfg, trainer=trainer)
self.preprocessor = EncDecSpeakerLabelModel.from_config_dict(cfg.preprocessor)
self.encoder = EncDecSpeakerLabelModel.from_config_dict(cfg.encoder)
self.decoder = EncDecSpeakerLabelModel.from_config_dict(cfg.decoder)
if 'angular' in cfg.decoder and cfg.decoder['angular']:
logging.info("Training with Angular Softmax Loss")
scale = cfg.loss.scale
margin = cfg.loss.margin
self.loss = AngularSoftmaxLoss(scale=scale, margin=margin)
else:
logging.info("Training with Softmax-CrossEntropy loss")
self.loss = CELoss()
self._accuracy = TopKClassificationAccuracy(top_k=[1], dist_sync_on_step=True)
def __setup_dataloader_from_config(self, config: Optional[Dict]):
if 'augmentor' in config:
augmentor = process_augmentations(config['augmentor'])
else:
augmentor = None
featurizer = WaveformFeaturizer(
sample_rate=config['sample_rate'], int_values=config.get('int_values', False), augmentor=augmentor
)
self.dataset = AudioToSpeechLabelDataSet(
manifest_filepath=config['manifest_filepath'],
labels=config['labels'],
featurizer=featurizer,
max_duration=config.get('max_duration', None),
min_duration=config.get('min_duration', None),
trim=config.get('trim_silence', True),
load_audio=config.get('load_audio', True),
time_length=config.get('time_length', 8),
)
return torch.utils.data.DataLoader(
dataset=self.dataset,
batch_size=config['batch_size'],
collate_fn=self.dataset.fixed_seq_collate_fn,
drop_last=config.get('drop_last', False),
shuffle=config['shuffle'],
num_workers=config.get('num_workers', 2),
pin_memory=config.get('pin_memory', False),
)
def setup_training_data(self, train_data_layer_config: Optional[Union[DictConfig, Dict]]):
if 'shuffle' not in train_data_layer_config:
train_data_layer_config['shuffle'] = True
self._train_dl = self.__setup_dataloader_from_config(config=train_data_layer_config)
def setup_validation_data(self, val_data_layer_config: Optional[Union[DictConfig, Dict]]):
if 'shuffle' not in val_data_layer_config:
val_data_layer_config['shuffle'] = False
val_data_layer_config['labels'] = self.dataset.labels
self._validation_dl = self.__setup_dataloader_from_config(config=val_data_layer_config)
def setup_test_data(self, test_data_layer_params: Optional[Union[DictConfig, Dict]]):
if 'shuffle' not in test_data_layer_params:
test_data_layer_params['shuffle'] = False
if hasattr(self, 'dataset'):
test_data_layer_params['labels'] = self.dataset.labels
self.embedding_dir = test_data_layer_params.get('embedding_dir', './')
self.test_manifest = test_data_layer_params.get('manifest_filepath', None)
self._test_dl = self.__setup_dataloader_from_config(config=test_data_layer_params)
@property
def input_types(self) -> Optional[Dict[str, NeuralType]]:
if hasattr(self.preprocessor, '_sample_rate'):
audio_eltype = AudioSignal(freq=self.preprocessor._sample_rate)
else:
audio_eltype = AudioSignal()
return {
"input_signal": NeuralType(('B', 'T'), audio_eltype),
"input_signal_length": NeuralType(tuple('B'), LengthsType()),
}
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return {
"logits": NeuralType(('B', 'D'), LogitsType()),
"embs": NeuralType(('B', 'D'), AcousticEncodedRepresentation()),
}
@typecheck()
def forward(self, input_signal, input_signal_length):
processed_signal, processed_signal_len = self.preprocessor(
input_signal=input_signal, length=input_signal_length,
)
encoded, _ = self.encoder(audio_signal=processed_signal, length=processed_signal_len)
logits, embs = self.decoder(encoder_output=encoded)
return logits, embs
def training_step(self, batch, batch_idx):
audio_signal, audio_signal_len, labels, _ = batch
logits, _ = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)
loss = self.loss(logits=logits, labels=labels)
self.log('loss', loss)
self.log('learning_rate', self._optimizer.param_groups[0]['lr'])
self._accuracy(logits=logits, labels=labels)
top_k = self._accuracy.compute()
for i, top_i in enumerate(top_k):
self.log(f'training_batch_accuracy_top@{i}', top_i)
return {'loss': loss}
def validation_step(self, batch, batch_idx, dataloader_idx: int = 0):
audio_signal, audio_signal_len, labels, _ = batch
logits, _ = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)
loss_value = self.loss(logits=logits, labels=labels)
acc_top_k = self._accuracy(logits=logits, labels=labels)
correct_counts, total_counts = self._accuracy.correct_counts_k, self._accuracy.total_counts_k
return {
'val_loss': loss_value,
'val_correct_counts': correct_counts,
'val_total_counts': total_counts,
'val_acc_top_k': acc_top_k,
}
def multi_validation_epoch_end(self, outputs, dataloader_idx: int = 0):
val_loss_mean = torch.stack([x['val_loss'] for x in outputs]).mean()
correct_counts = torch.stack([x['val_correct_counts'] for x in outputs]).sum(axis=0)
total_counts = torch.stack([x['val_total_counts'] for x in outputs]).sum(axis=0)
self._accuracy.correct_counts_k = correct_counts
self._accuracy.total_counts_k = total_counts
topk_scores = self._accuracy.compute()
logging.info("val_loss: {:.3f}".format(val_loss_mean))
self.log('val_loss', val_loss_mean)
for top_k, score in zip(self._accuracy.top_k, topk_scores):
self.log('val_epoch_accuracy_top@{}'.format(top_k), score)
return {
'val_loss': val_loss_mean,
'val_acc_top_k': topk_scores,
}
def test_step(self, batch, batch_idx, dataloader_idx: int = 0):
audio_signal, audio_signal_len, labels, _ = batch
logits, _ = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)
loss_value = self.loss(logits=logits, labels=labels)
acc_top_k = self._accuracy(logits=logits, labels=labels)
correct_counts, total_counts = self._accuracy.correct_counts_k, self._accuracy.total_counts_k
return {
'test_loss': loss_value,
'test_correct_counts': correct_counts,
'test_total_counts': total_counts,
'test_acc_top_k': acc_top_k,
}
def multi_test_epoch_end(self, outputs, dataloader_idx: int = 0):
test_loss_mean = torch.stack([x['test_loss'] for x in outputs]).mean()
correct_counts = torch.stack([x['test_correct_counts'] for x in outputs]).sum(axis=0)
total_counts = torch.stack([x['test_total_counts'] for x in outputs]).sum(axis=0)
self._accuracy.correct_counts_k = correct_counts
self._accuracy.total_counts_k = total_counts
topk_scores = self._accuracy.compute()
logging.info("test_loss: {:.3f}".format(test_loss_mean))
self.log('test_loss', test_loss_mean)
for top_k, score in zip(self._accuracy.top_k, topk_scores):
self.log('test_epoch_accuracy_top@{}'.format(top_k), score)
return {
'test_loss': test_loss_mean,
'test_acc_top_k': topk_scores,
}
def setup_finetune_model(self, model_config: DictConfig):
if hasattr(self, 'dataset'):
scratch_labels = self.dataset.labels
else:
scratch_labels = None
logging.info("Setting up data loaders with manifests provided from model_config")
if 'train_ds' in model_config and model_config.train_ds is not None:
self.setup_training_data(model_config.train_ds)
else:
raise KeyError("train_ds is not found in model_config but you need it for fine tuning")
if self.dataset.labels is None or len(self.dataset.labels) == 0:
raise ValueError(f'New labels must be non-empty list of labels. But I got: {self.dataset.labels}')
if 'validation_ds' in model_config and model_config.validation_ds is not None:
self.setup_multiple_validation_data(model_config.validation_ds)
if 'test_ds' in model_config and model_config.test_ds is not None:
self.setup_multiple_test_data(model_config.test_ds)
if scratch_labels == self.dataset.labels:
logging.warning(
"Trained dataset labels are same as finetune dataset labels -- continuing change of decoder parameters"
)
elif scratch_labels is None:
logging.warning(
"Either you provided a dummy manifest file during training from scratch or you restored from a pretrained nemo file"
)
decoder_config = model_config.decoder
new_decoder_config = copy.deepcopy(decoder_config)
if new_decoder_config['num_classes'] != len(self.dataset.labels):
raise ValueError(
"number of classes provided {} is not same as number of different labels in finetuning data: {}".format(
new_decoder_config['num_classes'], len(self.dataset.labels)
)
)
del self.decoder
self.decoder = EncDecSpeakerLabelModel.from_config_dict(new_decoder_config)
with open_dict(self._cfg.decoder):
self._cfg.decoder = new_decoder_config
logging.info(f"Changed decoder output to # {self.decoder._num_classes} classes.")
def export(
self,
output: str,
input_example=None,
output_example=None,
verbose=False,
export_params=True,
do_constant_folding=True,
keep_initializers_as_inputs=False,
onnx_opset_version: int = 12,
try_script: bool = False,
set_eval: bool = True,
check_trace: bool = True,
use_dynamic_axes: bool = True,
):
if input_example is not None or output_example is not None:
logging.warning(
"Passed input and output examples will be ignored and recomputed since"
" EncDecSpeakerModel consists of two separate models (encoder and decoder) with different"
" inputs and outputs."
)
encoder_onnx = self.encoder.export(
os.path.join(os.path.dirname(output), 'encoder_' + os.path.basename(output)),
None,
None,
verbose,
export_params,
do_constant_folding,
keep_initializers_as_inputs,
onnx_opset_version,
try_script,
set_eval,
check_trace,
use_dynamic_axes,
)
decoder_onnx = self.decoder.export(
os.path.join(os.path.dirname(output), 'decoder_' + os.path.basename(output)),
None,
None,
verbose,
export_params,
do_constant_folding,
keep_initializers_as_inputs,
onnx_opset_version,
try_script,
set_eval,
check_trace,
use_dynamic_axes,
)
output_model = attach_onnx_to_onnx(encoder_onnx, decoder_onnx, "SL")
onnx.save(output_model, output)
class ExtractSpeakerEmbeddingsModel(EncDecSpeakerLabelModel):
def __init__(self, cfg: DictConfig, trainer: Trainer = None):
super().__init__(cfg=cfg, trainer=trainer)
def test_step(self, batch, batch_ix):
audio_signal, audio_signal_len, labels, slices = batch
_, embs = self.forward(input_signal=audio_signal, input_signal_length=audio_signal_len)
return {'embs': embs, 'labels': labels, 'slices': slices}
def test_epoch_end(self, outputs):
embs = torch.cat([x['embs'] for x in outputs])
slices = torch.cat([x['slices'] for x in outputs])
emb_shape = embs.shape[-1]
embs = embs.view(-1, emb_shape).cpu().numpy()
out_embeddings = {}
start_idx = 0
with open(self.test_manifest, 'r') as manifest:
for idx, line in enumerate(manifest.readlines()):
line = line.strip()
dic = json.loads(line)
structure = dic['audio_filepath'].split('/')[-3:]
uniq_name = '@'.join(structure)
if uniq_name in out_embeddings:
raise KeyError("Embeddings for label {} already present in emb dictionary".format(uniq_name))
num_slices = slices[idx]
end_idx = start_idx + num_slices
out_embeddings[uniq_name] = embs[start_idx:end_idx].mean(axis=0)
start_idx = end_idx
embedding_dir = os.path.join(self.embedding_dir, 'embeddings')
if not os.path.exists(embedding_dir):
os.mkdir(embedding_dir)
prefix = self.test_manifest.split('/')[-1].split('.')[-2]
name = os.path.join(embedding_dir, prefix)
pkl.dump(out_embeddings, open(name + '_embeddings.pkl', 'wb'))
logging.info("Saved embedding files to {}".format(embedding_dir))
return {}
| true | true |
f71f140f350264a0dce7591a5dbac3e9ba360b5f | 477 | py | Python | output/models/ms_data/regex/re_f7_xsd/re_f7.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/ms_data/regex/re_f7_xsd/re_f7.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/ms_data/regex/re_f7_xsd/re_f7.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from dataclasses import dataclass, field
from typing import List, Optional
@dataclass
class Regex:
att: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"pattern": r"[^\s]{3}",
}
)
@dataclass
class Doc:
class Meta:
name = "doc"
elem: List[Regex] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
}
)
| 17.035714 | 40 | 0.505241 | from dataclasses import dataclass, field
from typing import List, Optional
@dataclass
class Regex:
att: Optional[str] = field(
default=None,
metadata={
"type": "Attribute",
"pattern": r"[^\s]{3}",
}
)
@dataclass
class Doc:
class Meta:
name = "doc"
elem: List[Regex] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "",
}
)
| true | true |
f71f145a83c52f0a8a2143d25a2e21652b51782c | 5,116 | py | Python | caflow/models/modules/mri_to_pet/UnconditionalFlow.py | GBATZOLIS/CAFLOW | ea33f84c424bd8e46999be59cd5d52bd8f0a3a77 | [
"MIT"
] | 6 | 2021-06-01T15:29:20.000Z | 2022-03-01T03:58:43.000Z | caflow/models/modules/mri_to_pet/UnconditionalFlow.py | GBATZOLIS/CAFLOW | ea33f84c424bd8e46999be59cd5d52bd8f0a3a77 | [
"MIT"
] | null | null | null | caflow/models/modules/mri_to_pet/UnconditionalFlow.py | GBATZOLIS/CAFLOW | ea33f84c424bd8e46999be59cd5d52bd8f0a3a77 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 19 22:02:53 2021
@author: gbatz97
"""
import torch.nn as nn
import torch
from caflow.models.modules.blocks.FlowBlock import FlowBlock
from caflow.models.modules.blocks.Dequantisation import Dequantisation, VariationalDequantization
class UnconditionalFlow(nn.Module):
def __init__(self, channels, dim, resolution, scales, scale_depth, quants, vardeq_depth, coupling_type, nn_settings):
super(UnconditionalFlow, self).__init__()
self.channels = channels
self.dim = dim
self.resolution = resolution
self.scales = scales
self.scale_blocks = nn.ModuleList()
if vardeq_depth is None:
self.scale_blocks.append(Dequantisation(dim=dim, quants=quants))
else:
self.scale_blocks.append(VariationalDequantization(channels=channels, depth=vardeq_depth, dim=dim, \
resolution=self.calculate_resolution(dim, 0),\
quants=quants, coupling_type=coupling_type, nn_settings=nn_settings))
for scale in range(self.scales):
scale_channels = self.calculate_scale_channels(dim, scale)
resolution = self.calculate_resolution(dim, scale)
self.scale_blocks.append(FlowBlock(channels = scale_channels, dim = dim,
resolution=resolution, depth = scale_depth,
coupling_type=coupling_type, nn_settings=nn_settings))
# Create prior distribution for final latent space
self.prior = torch.distributions.normal.Normal(loc=0.0, scale=1.0)
def calculate_resolution(self, dim, scale):
if isinstance(self.resolution, int):
resolution = tuple([self.resolution//2**scale for _ in range(self.dim)])
else:
resolution = tuple([x//2**scale for x in self.resolution])
return resolution
def calculate_scale_channels(self, dim, scale):
if scale==0:
return 2 ** (dim * scale) * self.channels
else:
return 2 ** ((dim-1) * scale) * self.channels
def forward(self, y=None, z=[], logprior=0., logdet=0., reverse=False):
if reverse:
#assert z is not None
y_dec, logdet = self.decode(z, logdet=logdet)
return y_dec, logdet
else:
#assert y is not None
z_enc, logprior, logdet = self.encode(y, logprior=logprior, logdet=logdet)
return z_enc, logprior, logdet
def encode(self, y, logprior, logdet):
#y is the HR image/scan that we want to encode in the latent space
#z_enc: list of the encoded latent tensors in ascending scale order
#order: from scale 1 (dim=orig_dim/2) --- to --- scale n (dim=orig_dim/2^n)
h_pass = y
z_enc = []
h_pass, logdet = self.scale_blocks[0](h_pass, logdet, False) #dequantisation
for i in range(1, self.scales+1):
if i==self.scales:
h_split, logdet = self.scale_blocks[i](h_pass, logdet, False)
else:
h, logdet = self.scale_blocks[i](h_pass, logdet, False)
h_split, h_pass = h.chunk(2, dim=1)
logprior+=self.prior.log_prob(h_split).sum(dim = [i+1 for i in range(self.dim+1)])
z_enc.append(h_split)
return z_enc, logprior, logdet
def decode(self, z:list, logdet):
#z is a list of the latent tensors of the different scales.
#The tensors of different scales have been put in an ascending order
#z = [h_split(1st scale)-size:D/2, ..., h_split(nth scale)-size:D/2^n]
h_pass=None
for i in range(self.scales):
h_split = z[self.scales-1-i]
if h_pass==None:
concat_pass = h_split
else:
concat_pass = torch.cat([h_split, h_pass], dim=1)
h_pass, logdet = self.scale_blocks[self.scales-i](concat_pass, logdet, True)
h_pass, logdet = self.scale_blocks[0](h_pass, logdet, True) #quantisation
return h_pass, logdet
"""
#instantiate the unconditional flow
rflow = UnconditionalFlow(channels=1, dim=3, scales=4, scale_depth=3, network = GatedConvNet)
y = torch.randn((2, 1, 64, 64, 64), dtype=torch.float32)
print('y shape: ', y.size())
print('Encoding y with the forward pass...We get z_enc (same dimensionality)')
z_enc, logprior, logdet = rflow(y=y)
print('z_enc elements:')
for i, elem in enumerate(z_enc):
print(i, elem.size())
print('logprior size: ', logprior.size())
print('logdet size: ', logdet.size())
print('Decoding y_dec from its z_enc enconding... We pass z_enc through the backward pass.')
y_dec, logdet = rflow(z=z_enc, reverse=True)
print('y_dec size:', y_dec.size())
r = torch.abs(y-y_dec)
print('sum(|y-y_dec|)',torch.sum(r))
print('mean(|y-y_dec|):',torch.mean(r))
"""
| 38.179104 | 125 | 0.60516 |
import torch.nn as nn
import torch
from caflow.models.modules.blocks.FlowBlock import FlowBlock
from caflow.models.modules.blocks.Dequantisation import Dequantisation, VariationalDequantization
class UnconditionalFlow(nn.Module):
def __init__(self, channels, dim, resolution, scales, scale_depth, quants, vardeq_depth, coupling_type, nn_settings):
super(UnconditionalFlow, self).__init__()
self.channels = channels
self.dim = dim
self.resolution = resolution
self.scales = scales
self.scale_blocks = nn.ModuleList()
if vardeq_depth is None:
self.scale_blocks.append(Dequantisation(dim=dim, quants=quants))
else:
self.scale_blocks.append(VariationalDequantization(channels=channels, depth=vardeq_depth, dim=dim, \
resolution=self.calculate_resolution(dim, 0),\
quants=quants, coupling_type=coupling_type, nn_settings=nn_settings))
for scale in range(self.scales):
scale_channels = self.calculate_scale_channels(dim, scale)
resolution = self.calculate_resolution(dim, scale)
self.scale_blocks.append(FlowBlock(channels = scale_channels, dim = dim,
resolution=resolution, depth = scale_depth,
coupling_type=coupling_type, nn_settings=nn_settings))
self.prior = torch.distributions.normal.Normal(loc=0.0, scale=1.0)
def calculate_resolution(self, dim, scale):
if isinstance(self.resolution, int):
resolution = tuple([self.resolution//2**scale for _ in range(self.dim)])
else:
resolution = tuple([x//2**scale for x in self.resolution])
return resolution
def calculate_scale_channels(self, dim, scale):
if scale==0:
return 2 ** (dim * scale) * self.channels
else:
return 2 ** ((dim-1) * scale) * self.channels
def forward(self, y=None, z=[], logprior=0., logdet=0., reverse=False):
if reverse:
y_dec, logdet = self.decode(z, logdet=logdet)
return y_dec, logdet
else:
z_enc, logprior, logdet = self.encode(y, logprior=logprior, logdet=logdet)
return z_enc, logprior, logdet
def encode(self, y, logprior, logdet):
h_pass = y
z_enc = []
h_pass, logdet = self.scale_blocks[0](h_pass, logdet, False)
for i in range(1, self.scales+1):
if i==self.scales:
h_split, logdet = self.scale_blocks[i](h_pass, logdet, False)
else:
h, logdet = self.scale_blocks[i](h_pass, logdet, False)
h_split, h_pass = h.chunk(2, dim=1)
logprior+=self.prior.log_prob(h_split).sum(dim = [i+1 for i in range(self.dim+1)])
z_enc.append(h_split)
return z_enc, logprior, logdet
def decode(self, z:list, logdet):
h_pass=None
for i in range(self.scales):
h_split = z[self.scales-1-i]
if h_pass==None:
concat_pass = h_split
else:
concat_pass = torch.cat([h_split, h_pass], dim=1)
h_pass, logdet = self.scale_blocks[self.scales-i](concat_pass, logdet, True)
h_pass, logdet = self.scale_blocks[0](h_pass, logdet, True)
return h_pass, logdet
| true | true |
f71f1590d805d8b9c18634bc1e020d1aa22e3c29 | 2,037 | py | Python | python/coroutines/cofollow.py | ASMlover/study | 5878f862573061f94c5776a351e30270dfd9966a | [
"BSD-2-Clause"
] | 22 | 2015-05-18T07:04:36.000Z | 2021-08-02T03:01:43.000Z | python/coroutines/cofollow.py | ASMlover/study | 5878f862573061f94c5776a351e30270dfd9966a | [
"BSD-2-Clause"
] | 1 | 2017-08-31T22:13:57.000Z | 2017-09-05T15:00:25.000Z | python/coroutines/cofollow.py | ASMlover/study | 5878f862573061f94c5776a351e30270dfd9966a | [
"BSD-2-Clause"
] | 6 | 2015-06-06T07:16:12.000Z | 2021-07-06T13:45:56.000Z | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2020 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import time
import typing
import coroutine
from typing import Any, Generator, TextIO
def follow(fp:TextIO, target:Generator[None, str, None], from_end:bool=False) \
-> None:
from_end and fp.seek(0, 2)
while True:
line = fp.readline()
if not line:
time.sleep(0.1)
continue
target.send(line)
@coroutine.corouine
def printer() -> Generator[None, str, None]:
while True:
line = (yield)
print(line,)
if __name__ == '__main__':
fname = sys.argv[1] if len(sys.argv) > 1 else 'cofollow.py'
with open(fname) as fp:
follow(fp, printer())
| 35.12069 | 79 | 0.719686 |
import sys
import time
import typing
import coroutine
from typing import Any, Generator, TextIO
def follow(fp:TextIO, target:Generator[None, str, None], from_end:bool=False) \
-> None:
from_end and fp.seek(0, 2)
while True:
line = fp.readline()
if not line:
time.sleep(0.1)
continue
target.send(line)
@coroutine.corouine
def printer() -> Generator[None, str, None]:
while True:
line = (yield)
print(line,)
if __name__ == '__main__':
fname = sys.argv[1] if len(sys.argv) > 1 else 'cofollow.py'
with open(fname) as fp:
follow(fp, printer())
| true | true |
f71f1602d9340fa6aa90aeee7b17c3e2f020ff60 | 3,630 | py | Python | LaserTagger/Models/TransformerCRF_V2.py | tech-srl/c3po | ce1e002bf9d026c10fbd2c178d454ebb76cb7a94 | [
"MIT"
] | 18 | 2020-11-13T02:43:58.000Z | 2022-01-04T08:11:05.000Z | LaserTagger/Models/TransformerCRF_V2.py | shaileshj2803/c3po | a673a0514ee8c800efa12574ef8da3fcb8ef73b7 | [
"MIT"
] | 2 | 2021-03-11T01:19:55.000Z | 2021-05-21T15:01:42.000Z | LaserTagger/Models/TransformerCRF_V2.py | shaileshj2803/c3po | a673a0514ee8c800efa12574ef8da3fcb8ef73b7 | [
"MIT"
] | 6 | 2021-02-25T06:07:06.000Z | 2021-05-21T23:44:45.000Z | from torch import nn
from Models.CRF import CRF
from Models.Transformer import Transformer
from Models.TransformerCtx import TransformerCtx
from Models.SequenceEncoder import SequenceEncoder
from Models.Attention import Attention
import torch
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
class Transformer_CRF(nn.Module):
def __init__(self, vocab_size, ctx_vocab_size, nb_labels, emb_dim, hidden_dim, bos_idx, eos_idx, pad_idx, num_lstm_layers, dropout, device):
super().__init__()
self.transformer = Transformer(
vocab_size, in_dim=emb_dim, nb_labels=nb_labels, dropout=dropout
)
self.crf = CRF(
nb_labels,
device,
bos_idx,
eos_idx,
pad_tag_id=pad_idx,
batch_first=True,
)
self.ctx_encoder = TransformerCtx(ctx_vocab_size, device=device, in_dim=emb_dim)
self.ctx_combiner = Attention(emb_dim)
self.query = nn.Parameter(torch.Tensor(1, emb_dim))
torch.nn.init.xavier_uniform_(self.query.data)
self.emb_dim = emb_dim
self.ctx_linear = nn.Linear(2 * emb_dim, emb_dim)
def combine_ctx(self, x, before_ctx, after_ctx):
# (batch, h_dim)
before_ctx_encoded = self.before_ctx_encoder(before_ctx)
after_ctx_encoded = self.after_ctx_encoder(after_ctx)
# (batch, 2 * h_dim)
ctx_cat = torch.cat((before_ctx_encoded, after_ctx_encoded), dim=1)
# (batch, h_dim)
encoded_ctx = torch.tanh(self.ctx_linear(ctx_cat))
seq_len = x.shape[1]
# (batch, seq_len, h_dim)
encoded_ctx_repeated = encoded_ctx.unsqueeze(dim=0).repeat(seq_len, 1, 1)
return encoded_ctx_repeated
def forward_ctx(self, x, before_ctx, after_ctx):
batch_size = x.shape[0]
# (batch_size, 1, emb_dim)
query = self.query.expand(batch_size, self.emb_dim).unsqueeze(dim=1)
packed_query = pack_padded_sequence(query, batch_size * [1], batch_first=True, enforce_sorted=False)
# Packed sequence (before_ctx_length, batch_size, emb_dim)
encoded_before_ctx = self.ctx_encoder(before_ctx)
# (batch_size, 1, emb_dim)
encoded_before_ctx, _ = self.ctx_combiner(packed_query, encoded_before_ctx)
# Packed sequence (after_ctx_length, batch_size, emb_dim)
encoded_after_ctx = self.ctx_encoder(after_ctx)
# (batch_size, 1 ,emb_dim)
encoded_after_ctx, _ = self.ctx_combiner(packed_query, encoded_after_ctx)
# (batch_size ,emb_dim)
combined_ctx = self.ctx_linear(torch.cat([encoded_before_ctx, encoded_after_ctx], dim=2).squeeze())
# (1, batch_size ,emb_dim)
combined_ctx = combined_ctx.unsqueeze(dim=0)
seq_len = x.shape[1]
# (seq_len, batch_size, emb_dim)
combined_ctx = combined_ctx.repeat(seq_len, 1, 1)
return combined_ctx
def forward(self, x, before_ctx, after_ctx, mask=None):
# (seq_len, batch_size, emb_dim)
combined_ctx = self.forward_ctx(x, before_ctx, after_ctx)
# (batch_size, src_length, num_labels)
emissions = self.transformer(x, combined_ctx, mask)
score, path = self.crf.decode(emissions, mask=mask)
return score, path
def loss(self, x, before_ctx, after_ctx, y, mask=None):
# (seq_len, batch_size, emb_dim)
combined_ctx = self.forward_ctx(x, before_ctx, after_ctx)
# (batch_size, src_length, num_labels)
emissions = self.transformer(x, combined_ctx, mask)
nll = self.crf(emissions, y, mask=mask)
return nll
| 42.705882 | 144 | 0.674656 | from torch import nn
from Models.CRF import CRF
from Models.Transformer import Transformer
from Models.TransformerCtx import TransformerCtx
from Models.SequenceEncoder import SequenceEncoder
from Models.Attention import Attention
import torch
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
class Transformer_CRF(nn.Module):
def __init__(self, vocab_size, ctx_vocab_size, nb_labels, emb_dim, hidden_dim, bos_idx, eos_idx, pad_idx, num_lstm_layers, dropout, device):
super().__init__()
self.transformer = Transformer(
vocab_size, in_dim=emb_dim, nb_labels=nb_labels, dropout=dropout
)
self.crf = CRF(
nb_labels,
device,
bos_idx,
eos_idx,
pad_tag_id=pad_idx,
batch_first=True,
)
self.ctx_encoder = TransformerCtx(ctx_vocab_size, device=device, in_dim=emb_dim)
self.ctx_combiner = Attention(emb_dim)
self.query = nn.Parameter(torch.Tensor(1, emb_dim))
torch.nn.init.xavier_uniform_(self.query.data)
self.emb_dim = emb_dim
self.ctx_linear = nn.Linear(2 * emb_dim, emb_dim)
def combine_ctx(self, x, before_ctx, after_ctx):
before_ctx_encoded = self.before_ctx_encoder(before_ctx)
after_ctx_encoded = self.after_ctx_encoder(after_ctx)
ctx_cat = torch.cat((before_ctx_encoded, after_ctx_encoded), dim=1)
encoded_ctx = torch.tanh(self.ctx_linear(ctx_cat))
seq_len = x.shape[1]
encoded_ctx_repeated = encoded_ctx.unsqueeze(dim=0).repeat(seq_len, 1, 1)
return encoded_ctx_repeated
def forward_ctx(self, x, before_ctx, after_ctx):
batch_size = x.shape[0]
query = self.query.expand(batch_size, self.emb_dim).unsqueeze(dim=1)
packed_query = pack_padded_sequence(query, batch_size * [1], batch_first=True, enforce_sorted=False)
encoded_before_ctx = self.ctx_encoder(before_ctx)
encoded_before_ctx, _ = self.ctx_combiner(packed_query, encoded_before_ctx)
encoded_after_ctx = self.ctx_encoder(after_ctx)
encoded_after_ctx, _ = self.ctx_combiner(packed_query, encoded_after_ctx)
combined_ctx = self.ctx_linear(torch.cat([encoded_before_ctx, encoded_after_ctx], dim=2).squeeze())
combined_ctx = combined_ctx.unsqueeze(dim=0)
seq_len = x.shape[1]
combined_ctx = combined_ctx.repeat(seq_len, 1, 1)
return combined_ctx
def forward(self, x, before_ctx, after_ctx, mask=None):
combined_ctx = self.forward_ctx(x, before_ctx, after_ctx)
emissions = self.transformer(x, combined_ctx, mask)
score, path = self.crf.decode(emissions, mask=mask)
return score, path
def loss(self, x, before_ctx, after_ctx, y, mask=None):
combined_ctx = self.forward_ctx(x, before_ctx, after_ctx)
emissions = self.transformer(x, combined_ctx, mask)
nll = self.crf(emissions, y, mask=mask)
return nll
| true | true |
f71f16308749cbb3a9e24291b63fd1302e0d5211 | 39 | py | Python | run.py | jovanzac/Captain | 3e410aa22eec4f72274b9bf4f0f2b3c91936356d | [
"MIT"
] | null | null | null | run.py | jovanzac/Captain | 3e410aa22eec4f72274b9bf4f0f2b3c91936356d | [
"MIT"
] | null | null | null | run.py | jovanzac/Captain | 3e410aa22eec4f72274b9bf4f0f2b3c91936356d | [
"MIT"
] | 1 | 2020-12-25T08:21:37.000Z | 2020-12-25T08:21:37.000Z | """The entry point."""
import Scripts
| 9.75 | 22 | 0.666667 |
import Scripts
| true | true |
f71f183d4028a3c36013d6614421819a0cd6f672 | 11,436 | py | Python | facebook_business/adobjects/adaccounttargetingunified.py | s-nez/facebook-python-business-sdk | 4766644c7585d2e262463862f8aae26d5bea2615 | [
"CNRI-Python"
] | null | null | null | facebook_business/adobjects/adaccounttargetingunified.py | s-nez/facebook-python-business-sdk | 4766644c7585d2e262463862f8aae26d5bea2615 | [
"CNRI-Python"
] | null | null | null | facebook_business/adobjects/adaccounttargetingunified.py | s-nez/facebook-python-business-sdk | 4766644c7585d2e262463862f8aae26d5bea2615 | [
"CNRI-Python"
] | null | null | null | # Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebook_business.adobjects.abstractobject import AbstractObject
from facebook_business.adobjects.abstractcrudobject import AbstractCrudObject
from facebook_business.adobjects.objectparser import ObjectParser
from facebook_business.api import FacebookRequest
from facebook_business.typechecker import TypeChecker
"""
This class is auto-generated.
For any issues or feature requests related to this class, please let us know on
github and we'll fix in our codegen framework. We'll not be able to accept
pull request for this class.
"""
class AdAccountTargetingUnified(
AbstractCrudObject,
):
def __init__(self, fbid=None, parent_id=None, api=None):
self._isAdAccountTargetingUnified = True
super(AdAccountTargetingUnified, self).__init__(fbid, parent_id, api)
class Field(AbstractObject.Field):
audience_size = 'audience_size'
conversion_lift = 'conversion_lift'
description = 'description'
id = 'id'
img = 'img'
info = 'info'
info_title = 'info_title'
is_recommendation = 'is_recommendation'
key = 'key'
link = 'link'
name = 'name'
parent = 'parent'
partner = 'partner'
path = 'path'
performance_rating = 'performance_rating'
raw_name = 'raw_name'
recommendation_model = 'recommendation_model'
search_interest_id = 'search_interest_id'
source = 'source'
spend = 'spend'
type = 'type'
valid = 'valid'
class LimitType:
behaviors = 'behaviors'
college_years = 'college_years'
education_majors = 'education_majors'
education_schools = 'education_schools'
education_statuses = 'education_statuses'
ethnic_affinity = 'ethnic_affinity'
family_statuses = 'family_statuses'
generation = 'generation'
home_ownership = 'home_ownership'
home_type = 'home_type'
home_value = 'home_value'
household_composition = 'household_composition'
income = 'income'
industries = 'industries'
interested_in = 'interested_in'
interests = 'interests'
life_events = 'life_events'
location_categories = 'location_categories'
moms = 'moms'
net_worth = 'net_worth'
office_type = 'office_type'
politics = 'politics'
relationship_statuses = 'relationship_statuses'
user_adclusters = 'user_adclusters'
work_employers = 'work_employers'
work_positions = 'work_positions'
class RegulatedCategories:
credit = 'CREDIT'
employment = 'EMPLOYMENT'
housing = 'HOUSING'
issues_elections_politics = 'ISSUES_ELECTIONS_POLITICS'
none = 'NONE'
class WhitelistedTypes:
adgroup_id = 'adgroup_id'
age_max = 'age_max'
age_min = 'age_min'
alternate_auto_targeting_option = 'alternate_auto_targeting_option'
app_install_state = 'app_install_state'
audience_network_positions = 'audience_network_positions'
behaviors = 'behaviors'
brand_safety_content_filter_levels = 'brand_safety_content_filter_levels'
brand_safety_content_severity_levels = 'brand_safety_content_severity_levels'
catalog_based_targeting = 'catalog_based_targeting'
cities = 'cities'
college_years = 'college_years'
conjunctive_user_adclusters = 'conjunctive_user_adclusters'
connections = 'connections'
contextual_targeting_categories = 'contextual_targeting_categories'
countries = 'countries'
country = 'country'
country_groups = 'country_groups'
custom_audiences = 'custom_audiences'
device_platforms = 'device_platforms'
direct_install_devices = 'direct_install_devices'
dynamic_audience_ids = 'dynamic_audience_ids'
education_majors = 'education_majors'
education_schools = 'education_schools'
education_statuses = 'education_statuses'
effective_audience_network_positions = 'effective_audience_network_positions'
effective_device_platforms = 'effective_device_platforms'
effective_facebook_positions = 'effective_facebook_positions'
effective_instagram_positions = 'effective_instagram_positions'
effective_messenger_positions = 'effective_messenger_positions'
effective_publisher_platforms = 'effective_publisher_platforms'
effective_whatsapp_positions = 'effective_whatsapp_positions'
engagement_specs = 'engagement_specs'
ethnic_affinity = 'ethnic_affinity'
exclude_previous_days = 'exclude_previous_days'
exclude_reached_since = 'exclude_reached_since'
excluded_brand_safety_content_types = 'excluded_brand_safety_content_types'
excluded_connections = 'excluded_connections'
excluded_custom_audiences = 'excluded_custom_audiences'
excluded_dynamic_audience_ids = 'excluded_dynamic_audience_ids'
excluded_engagement_specs = 'excluded_engagement_specs'
excluded_geo_locations = 'excluded_geo_locations'
excluded_mobile_device_model = 'excluded_mobile_device_model'
excluded_product_audience_specs = 'excluded_product_audience_specs'
excluded_publisher_categories = 'excluded_publisher_categories'
excluded_publisher_list_ids = 'excluded_publisher_list_ids'
excluded_user_adclusters = 'excluded_user_adclusters'
excluded_user_device = 'excluded_user_device'
exclusions = 'exclusions'
facebook_positions = 'facebook_positions'
family_statuses = 'family_statuses'
fb_deal_id = 'fb_deal_id'
flexible_spec = 'flexible_spec'
follow_profiles = 'follow_profiles'
follow_profiles_negative = 'follow_profiles_negative'
format = 'format'
friends_of_connections = 'friends_of_connections'
gatekeepers = 'gatekeepers'
genders = 'genders'
generation = 'generation'
geo_locations = 'geo_locations'
home_ownership = 'home_ownership'
home_type = 'home_type'
home_value = 'home_value'
household_composition = 'household_composition'
id = 'id'
income = 'income'
industries = 'industries'
instagram_positions = 'instagram_positions'
instream_video_sponsorship_placements = 'instream_video_sponsorship_placements'
interest_defaults_source = 'interest_defaults_source'
interested_in = 'interested_in'
interests = 'interests'
is_instagram_destination_ad = 'is_instagram_destination_ad'
is_whatsapp_destination_ad = 'is_whatsapp_destination_ad'
keywords = 'keywords'
life_events = 'life_events'
locales = 'locales'
location_categories = 'location_categories'
location_cluster_ids = 'location_cluster_ids'
location_expansion = 'location_expansion'
marketplace_product_categories = 'marketplace_product_categories'
messenger_positions = 'messenger_positions'
mobile_device_model = 'mobile_device_model'
moms = 'moms'
net_worth = 'net_worth'
office_type = 'office_type'
page_types = 'page_types'
place_page_set_ids = 'place_page_set_ids'
political_views = 'political_views'
politics = 'politics'
product_audience_specs = 'product_audience_specs'
prospecting_audience = 'prospecting_audience'
publisher_platforms = 'publisher_platforms'
radius = 'radius'
regions = 'regions'
relationship_statuses = 'relationship_statuses'
rtb_flag = 'rtb_flag'
site_category = 'site_category'
targeting_optimization = 'targeting_optimization'
timezones = 'timezones'
topic = 'topic'
trending = 'trending'
user_adclusters = 'user_adclusters'
user_device = 'user_device'
user_event = 'user_event'
user_os = 'user_os'
user_page_threads = 'user_page_threads'
user_page_threads_excluded = 'user_page_threads_excluded'
whatsapp_positions = 'whatsapp_positions'
wireless_carrier = 'wireless_carrier'
work_employers = 'work_employers'
work_positions = 'work_positions'
zips = 'zips'
class Mode:
best_performing = 'best_performing'
recently_used = 'recently_used'
related = 'related'
suggestions = 'suggestions'
class Objective:
app_installs = 'APP_INSTALLS'
brand_awareness = 'BRAND_AWARENESS'
conversions = 'CONVERSIONS'
event_responses = 'EVENT_RESPONSES'
lead_generation = 'LEAD_GENERATION'
link_clicks = 'LINK_CLICKS'
local_awareness = 'LOCAL_AWARENESS'
messages = 'MESSAGES'
offer_claims = 'OFFER_CLAIMS'
page_likes = 'PAGE_LIKES'
post_engagement = 'POST_ENGAGEMENT'
product_catalog_sales = 'PRODUCT_CATALOG_SALES'
reach = 'REACH'
video_views = 'VIDEO_VIEWS'
_field_types = {
'audience_size': 'unsigned int',
'conversion_lift': 'float',
'description': 'string',
'id': 'string',
'img': 'string',
'info': 'string',
'info_title': 'string',
'is_recommendation': 'bool',
'key': 'string',
'link': 'string',
'name': 'string',
'parent': 'string',
'partner': 'string',
'path': 'list<string>',
'performance_rating': 'unsigned int',
'raw_name': 'string',
'recommendation_model': 'string',
'search_interest_id': 'string',
'source': 'string',
'spend': 'float',
'type': 'string',
'valid': 'bool',
}
@classmethod
def _get_field_enum_info(cls):
field_enum_info = {}
field_enum_info['LimitType'] = AdAccountTargetingUnified.LimitType.__dict__.values()
field_enum_info['RegulatedCategories'] = AdAccountTargetingUnified.RegulatedCategories.__dict__.values()
field_enum_info['WhitelistedTypes'] = AdAccountTargetingUnified.WhitelistedTypes.__dict__.values()
field_enum_info['Mode'] = AdAccountTargetingUnified.Mode.__dict__.values()
field_enum_info['Objective'] = AdAccountTargetingUnified.Objective.__dict__.values()
return field_enum_info
| 41.585455 | 112 | 0.690976 |
from facebook_business.adobjects.abstractobject import AbstractObject
from facebook_business.adobjects.abstractcrudobject import AbstractCrudObject
from facebook_business.adobjects.objectparser import ObjectParser
from facebook_business.api import FacebookRequest
from facebook_business.typechecker import TypeChecker
class AdAccountTargetingUnified(
AbstractCrudObject,
):
def __init__(self, fbid=None, parent_id=None, api=None):
self._isAdAccountTargetingUnified = True
super(AdAccountTargetingUnified, self).__init__(fbid, parent_id, api)
class Field(AbstractObject.Field):
audience_size = 'audience_size'
conversion_lift = 'conversion_lift'
description = 'description'
id = 'id'
img = 'img'
info = 'info'
info_title = 'info_title'
is_recommendation = 'is_recommendation'
key = 'key'
link = 'link'
name = 'name'
parent = 'parent'
partner = 'partner'
path = 'path'
performance_rating = 'performance_rating'
raw_name = 'raw_name'
recommendation_model = 'recommendation_model'
search_interest_id = 'search_interest_id'
source = 'source'
spend = 'spend'
type = 'type'
valid = 'valid'
class LimitType:
behaviors = 'behaviors'
college_years = 'college_years'
education_majors = 'education_majors'
education_schools = 'education_schools'
education_statuses = 'education_statuses'
ethnic_affinity = 'ethnic_affinity'
family_statuses = 'family_statuses'
generation = 'generation'
home_ownership = 'home_ownership'
home_type = 'home_type'
home_value = 'home_value'
household_composition = 'household_composition'
income = 'income'
industries = 'industries'
interested_in = 'interested_in'
interests = 'interests'
life_events = 'life_events'
location_categories = 'location_categories'
moms = 'moms'
net_worth = 'net_worth'
office_type = 'office_type'
politics = 'politics'
relationship_statuses = 'relationship_statuses'
user_adclusters = 'user_adclusters'
work_employers = 'work_employers'
work_positions = 'work_positions'
class RegulatedCategories:
credit = 'CREDIT'
employment = 'EMPLOYMENT'
housing = 'HOUSING'
issues_elections_politics = 'ISSUES_ELECTIONS_POLITICS'
none = 'NONE'
class WhitelistedTypes:
adgroup_id = 'adgroup_id'
age_max = 'age_max'
age_min = 'age_min'
alternate_auto_targeting_option = 'alternate_auto_targeting_option'
app_install_state = 'app_install_state'
audience_network_positions = 'audience_network_positions'
behaviors = 'behaviors'
brand_safety_content_filter_levels = 'brand_safety_content_filter_levels'
brand_safety_content_severity_levels = 'brand_safety_content_severity_levels'
catalog_based_targeting = 'catalog_based_targeting'
cities = 'cities'
college_years = 'college_years'
conjunctive_user_adclusters = 'conjunctive_user_adclusters'
connections = 'connections'
contextual_targeting_categories = 'contextual_targeting_categories'
countries = 'countries'
country = 'country'
country_groups = 'country_groups'
custom_audiences = 'custom_audiences'
device_platforms = 'device_platforms'
direct_install_devices = 'direct_install_devices'
dynamic_audience_ids = 'dynamic_audience_ids'
education_majors = 'education_majors'
education_schools = 'education_schools'
education_statuses = 'education_statuses'
effective_audience_network_positions = 'effective_audience_network_positions'
effective_device_platforms = 'effective_device_platforms'
effective_facebook_positions = 'effective_facebook_positions'
effective_instagram_positions = 'effective_instagram_positions'
effective_messenger_positions = 'effective_messenger_positions'
effective_publisher_platforms = 'effective_publisher_platforms'
effective_whatsapp_positions = 'effective_whatsapp_positions'
engagement_specs = 'engagement_specs'
ethnic_affinity = 'ethnic_affinity'
exclude_previous_days = 'exclude_previous_days'
exclude_reached_since = 'exclude_reached_since'
excluded_brand_safety_content_types = 'excluded_brand_safety_content_types'
excluded_connections = 'excluded_connections'
excluded_custom_audiences = 'excluded_custom_audiences'
excluded_dynamic_audience_ids = 'excluded_dynamic_audience_ids'
excluded_engagement_specs = 'excluded_engagement_specs'
excluded_geo_locations = 'excluded_geo_locations'
excluded_mobile_device_model = 'excluded_mobile_device_model'
excluded_product_audience_specs = 'excluded_product_audience_specs'
excluded_publisher_categories = 'excluded_publisher_categories'
excluded_publisher_list_ids = 'excluded_publisher_list_ids'
excluded_user_adclusters = 'excluded_user_adclusters'
excluded_user_device = 'excluded_user_device'
exclusions = 'exclusions'
facebook_positions = 'facebook_positions'
family_statuses = 'family_statuses'
fb_deal_id = 'fb_deal_id'
flexible_spec = 'flexible_spec'
follow_profiles = 'follow_profiles'
follow_profiles_negative = 'follow_profiles_negative'
format = 'format'
friends_of_connections = 'friends_of_connections'
gatekeepers = 'gatekeepers'
genders = 'genders'
generation = 'generation'
geo_locations = 'geo_locations'
home_ownership = 'home_ownership'
home_type = 'home_type'
home_value = 'home_value'
household_composition = 'household_composition'
id = 'id'
income = 'income'
industries = 'industries'
instagram_positions = 'instagram_positions'
instream_video_sponsorship_placements = 'instream_video_sponsorship_placements'
interest_defaults_source = 'interest_defaults_source'
interested_in = 'interested_in'
interests = 'interests'
is_instagram_destination_ad = 'is_instagram_destination_ad'
is_whatsapp_destination_ad = 'is_whatsapp_destination_ad'
keywords = 'keywords'
life_events = 'life_events'
locales = 'locales'
location_categories = 'location_categories'
location_cluster_ids = 'location_cluster_ids'
location_expansion = 'location_expansion'
marketplace_product_categories = 'marketplace_product_categories'
messenger_positions = 'messenger_positions'
mobile_device_model = 'mobile_device_model'
moms = 'moms'
net_worth = 'net_worth'
office_type = 'office_type'
page_types = 'page_types'
place_page_set_ids = 'place_page_set_ids'
political_views = 'political_views'
politics = 'politics'
product_audience_specs = 'product_audience_specs'
prospecting_audience = 'prospecting_audience'
publisher_platforms = 'publisher_platforms'
radius = 'radius'
regions = 'regions'
relationship_statuses = 'relationship_statuses'
rtb_flag = 'rtb_flag'
site_category = 'site_category'
targeting_optimization = 'targeting_optimization'
timezones = 'timezones'
topic = 'topic'
trending = 'trending'
user_adclusters = 'user_adclusters'
user_device = 'user_device'
user_event = 'user_event'
user_os = 'user_os'
user_page_threads = 'user_page_threads'
user_page_threads_excluded = 'user_page_threads_excluded'
whatsapp_positions = 'whatsapp_positions'
wireless_carrier = 'wireless_carrier'
work_employers = 'work_employers'
work_positions = 'work_positions'
zips = 'zips'
class Mode:
best_performing = 'best_performing'
recently_used = 'recently_used'
related = 'related'
suggestions = 'suggestions'
class Objective:
app_installs = 'APP_INSTALLS'
brand_awareness = 'BRAND_AWARENESS'
conversions = 'CONVERSIONS'
event_responses = 'EVENT_RESPONSES'
lead_generation = 'LEAD_GENERATION'
link_clicks = 'LINK_CLICKS'
local_awareness = 'LOCAL_AWARENESS'
messages = 'MESSAGES'
offer_claims = 'OFFER_CLAIMS'
page_likes = 'PAGE_LIKES'
post_engagement = 'POST_ENGAGEMENT'
product_catalog_sales = 'PRODUCT_CATALOG_SALES'
reach = 'REACH'
video_views = 'VIDEO_VIEWS'
_field_types = {
'audience_size': 'unsigned int',
'conversion_lift': 'float',
'description': 'string',
'id': 'string',
'img': 'string',
'info': 'string',
'info_title': 'string',
'is_recommendation': 'bool',
'key': 'string',
'link': 'string',
'name': 'string',
'parent': 'string',
'partner': 'string',
'path': 'list<string>',
'performance_rating': 'unsigned int',
'raw_name': 'string',
'recommendation_model': 'string',
'search_interest_id': 'string',
'source': 'string',
'spend': 'float',
'type': 'string',
'valid': 'bool',
}
@classmethod
def _get_field_enum_info(cls):
field_enum_info = {}
field_enum_info['LimitType'] = AdAccountTargetingUnified.LimitType.__dict__.values()
field_enum_info['RegulatedCategories'] = AdAccountTargetingUnified.RegulatedCategories.__dict__.values()
field_enum_info['WhitelistedTypes'] = AdAccountTargetingUnified.WhitelistedTypes.__dict__.values()
field_enum_info['Mode'] = AdAccountTargetingUnified.Mode.__dict__.values()
field_enum_info['Objective'] = AdAccountTargetingUnified.Objective.__dict__.values()
return field_enum_info
| true | true |
f71f1873191d210df18e5af80874ac079e4f1b83 | 11,453 | py | Python | lib/rucio/tests/test_scope.py | TeAmP0is0N/rucio | 45c1b83f8e1514953a41fd076b4e651dd564c39f | [
"Apache-2.0"
] | null | null | null | lib/rucio/tests/test_scope.py | TeAmP0is0N/rucio | 45c1b83f8e1514953a41fd076b4e651dd564c39f | [
"Apache-2.0"
] | null | null | null | lib/rucio/tests/test_scope.py | TeAmP0is0N/rucio | 45c1b83f8e1514953a41fd076b4e651dd564c39f | [
"Apache-2.0"
] | null | null | null | # Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Thomas Beermann, <thomas.beermann@cern.ch>, 2012
# - Angelos Molfetas, <angelos.molfetas@cern.ch>, 2012
# - Mario Lassnig, <mario.lassnig@cern.ch>, 2012
# - Vincent Garonne, <vincent.garonne@cern.ch>, 2012-2015
# - Cedric Serfon, <cedric.serfon@cern.ch>, 2017
# - Andrew Lister, <andrew.lister@stfc.ac.uk>, 2019
# - Patrick Austin <patrick.austin@stfc.ac.uk>, 2020
from json import dumps, loads
from paste.fixture import TestApp
from nose.tools import assert_equal, assert_true, assert_in, raises, assert_raises
from rucio.client.accountclient import AccountClient
from rucio.client.scopeclient import ScopeClient
from rucio.common.config import config_get, config_get_bool
from rucio.common.exception import AccountNotFound, Duplicate, ScopeNotFound, InvalidObject
from rucio.common.types import InternalAccount, InternalScope
from rucio.common.utils import generate_uuid as uuid
from rucio.core.scope import get_scopes, add_scope, is_scope_owner
from rucio.tests.common import account_name_generator, scope_name_generator
from rucio.web.rest.account import APP as account_app
from rucio.web.rest.authentication import APP as auth_app
class TestScopeCoreApi():
def __init__(self):
if config_get_bool('common', 'multi_vo', raise_exception=False, default=False):
self.vo = {'vo': config_get('client', 'vo', raise_exception=False, default='tst')}
else:
self.vo = {}
self.scopes = [InternalScope(scope_name_generator(), **self.vo) for _ in range(5)]
self.jdoe = InternalAccount('jdoe', **self.vo)
def test_list_scopes(self):
""" SCOPE (CORE): List scopes """
for scope in self.scopes:
add_scope(scope=scope, account=self.jdoe)
scopes = get_scopes(account=self.jdoe)
for scope in scopes:
assert_in(scope, scopes)
def test_is_scope_owner(self):
""" SCOPE (CORE): Is scope owner """
scope = InternalScope(scope_name_generator(), **self.vo)
add_scope(scope=scope, account=self.jdoe)
anwser = is_scope_owner(scope=scope, account=self.jdoe)
assert_equal(anwser, True)
class TestScope():
def __init__(self):
if config_get_bool('common', 'multi_vo', raise_exception=False, default=False):
self.vo_header = {'X-Rucio-VO': config_get('client', 'vo', raise_exception=False, default='tst')}
else:
self.vo_header = {}
self.scopes = [scope_name_generator() for _ in range(5)]
def test_scope_success(self):
""" SCOPE (REST): send a POST to create a new account and scope """
mw = []
headers1 = {'X-Rucio-Account': 'root', 'X-Rucio-Username': 'ddmlab', 'X-Rucio-Password': 'secret'}
headers1.update(self.vo_header)
res1 = TestApp(auth_app.wsgifunc(*mw)).get('/userpass', headers=headers1, expect_errors=True)
assert_equal(res1.status, 200)
token = str(res1.header('X-Rucio-Auth-Token'))
headers2 = {'X-Rucio-Auth-Token': str(token)}
acntusr = account_name_generator()
data = dumps({'type': 'USER', 'email': 'rucio.email.com'})
res2 = TestApp(account_app.wsgifunc(*mw)).post('/' + acntusr, headers=headers2, params=data, expect_errors=True)
assert_equal(res2.status, 201)
headers3 = {'X-Rucio-Auth-Token': str(token)}
scopeusr = scope_name_generator()
res3 = TestApp(account_app.wsgifunc(*mw)).post('/%s/scopes/%s' % (acntusr, scopeusr), headers=headers3, expect_errors=True)
assert_equal(res3.status, 201)
def test_scope_failure(self):
""" SCOPE (REST): send a POST to create a new scope for a not existing account to test the error"""
mw = []
headers1 = {'X-Rucio-Account': 'root', 'X-Rucio-Username': 'ddmlab', 'X-Rucio-Password': 'secret'}
headers1.update(self.vo_header)
res1 = TestApp(auth_app.wsgifunc(*mw)).get('/userpass', headers=headers1, expect_errors=True)
assert_equal(res1.status, 200)
token = str(res1.header('X-Rucio-Auth-Token'))
headers2 = {'X-Rucio-Auth-Token': str(token)}
scopeusr = scope_name_generator()
account_name_generator()
res2 = TestApp(account_app.wsgifunc(*mw)).post('/%s/scopes/%s' % (scopeusr, scopeusr), headers=headers2, expect_errors=True)
assert_equal(res2.status, 404)
def test_scope_duplicate(self):
""" SCOPE (REST): send a POST to create a already existing scope to test the error"""
mw = []
headers1 = {'X-Rucio-Account': 'root', 'X-Rucio-Username': 'ddmlab', 'X-Rucio-Password': 'secret'}
headers1.update(self.vo_header)
res1 = TestApp(auth_app.wsgifunc(*mw)).get('/userpass', headers=headers1, expect_errors=True)
assert_equal(res1.status, 200)
token = str(res1.header('X-Rucio-Auth-Token'))
headers2 = {'X-Rucio-Auth-Token': str(token)}
acntusr = account_name_generator()
data = dumps({'type': 'USER', 'email': 'rucio@email.com'})
res2 = TestApp(account_app.wsgifunc(*mw)).post('/' + acntusr, headers=headers2, params=data, expect_errors=True)
assert_equal(res2.status, 201)
headers3 = {'X-Rucio-Auth-Token': str(token)}
scopeusr = scope_name_generator()
res3 = TestApp(account_app.wsgifunc(*mw)).post('/%s/scopes/%s' % (acntusr, scopeusr), headers=headers3, expect_errors=True)
assert_equal(res3.status, 201)
res3 = TestApp(account_app.wsgifunc(*mw)).post('/%s/scopes/%s' % (acntusr, scopeusr), headers=headers3, expect_errors=True)
assert_equal(res3.status, 409)
def test_list_scope(self):
""" SCOPE (REST): send a GET list all scopes for one account """
mw = []
headers1 = {'X-Rucio-Account': 'root', 'X-Rucio-Username': 'ddmlab', 'X-Rucio-Password': 'secret'}
headers1.update(self.vo_header)
res1 = TestApp(auth_app.wsgifunc(*mw)).get('/userpass', headers=headers1, expect_errors=True)
assert_equal(res1.status, 200)
token = str(res1.header('X-Rucio-Auth-Token'))
tmp_val = account_name_generator()
headers2 = {'Rucio-Type': 'user', 'X-Rucio-Account': 'root', 'X-Rucio-Auth-Token': str(token)}
data = dumps({'type': 'USER', 'email': 'rucio@email.com'})
res2 = TestApp(account_app.wsgifunc(*mw)).post('/%s' % tmp_val, headers=headers2, params=data, expect_errors=True)
assert_equal(res2.status, 201)
headers3 = {'X-Rucio-Auth-Token': str(token)}
for scope in self.scopes:
data = dumps({})
res3 = TestApp(account_app.wsgifunc(*mw)).post('/%s/scopes/%s' % (tmp_val, scope), headers=headers3, params=data, expect_errors=True)
assert_equal(res3.status, 201)
res4 = TestApp(account_app.wsgifunc(*mw)).get('/%s/scopes/' % tmp_val, headers=headers3, expect_errors=True)
assert_equal(res4.status, 200)
svr_list = loads(res4.body)
for scope in self.scopes:
assert_in(scope, svr_list)
def test_list_scope_account_not_found(self):
""" SCOPE (REST): send a GET list all scopes for a not existing account """
mw = []
headers1 = {'X-Rucio-Account': 'root', 'X-Rucio-Username': 'ddmlab', 'X-Rucio-Password': 'secret'}
headers1.update(self.vo_header)
res1 = TestApp(auth_app.wsgifunc(*mw)).get('/userpass', headers=headers1, expect_errors=True)
assert_equal(res1.status, 200)
token = str(res1.header('X-Rucio-Auth-Token'))
headers3 = {'X-Rucio-Auth-Token': str(token)}
res3 = TestApp(account_app.wsgifunc(*mw)).get('/testaccount/scopes', headers=headers3, expect_errors=True)
assert_equal(res3.status, 404)
assert_equal(res3.header('ExceptionClass'), 'AccountNotFound')
def test_list_scope_no_scopes(self):
""" SCOPE (REST): send a GET list all scopes for one account without scopes """
mw = []
headers1 = {'X-Rucio-Account': 'root', 'X-Rucio-Username': 'ddmlab', 'X-Rucio-Password': 'secret'}
headers1.update(self.vo_header)
res1 = TestApp(auth_app.wsgifunc(*mw)).get('/userpass', headers=headers1, expect_errors=True)
assert_equal(res1.status, 200)
token = str(res1.header('X-Rucio-Auth-Token'))
headers2 = {'X-Rucio-Auth-Token': str(token)}
acntusr = account_name_generator()
data = dumps({'type': 'USER', 'email': 'rucio@email.com'})
res2 = TestApp(account_app.wsgifunc(*mw)).post('/' + acntusr, headers=headers2, params=data, expect_errors=True)
assert_equal(res2.status, 201)
headers3 = {'X-Rucio-Auth-Token': str(token)}
res4 = TestApp(account_app.wsgifunc(*mw)).get('/%s/scopes/' % (acntusr), headers=headers3, params=data, expect_errors=True)
assert_equal(res4.status, 404)
assert_equal(res4.header('ExceptionClass'), 'ScopeNotFound')
class TestScopeClient():
def __init__(self):
self.account_client = AccountClient()
self.scope_client = ScopeClient()
def test_create_scope(self):
""" SCOPE (CLIENTS): create a new scope."""
account = 'jdoe'
scope = scope_name_generator()
ret = self.scope_client.add_scope(account, scope)
assert_true(ret)
with assert_raises(InvalidObject):
self.scope_client.add_scope(account, 'tooooolooooongscooooooooooooope')
with assert_raises(InvalidObject):
self.scope_client.add_scope(account, '$?!')
@raises(AccountNotFound)
def test_create_scope_no_account(self):
""" SCOPE (CLIENTS): try to create scope for not existing account."""
account = str(uuid()).lower()[:30]
scope = scope_name_generator()
self.scope_client.add_scope(account, scope)
@raises(Duplicate)
def test_create_scope_duplicate(self):
""" SCOPE (CLIENTS): try to create a duplicate scope."""
account = 'jdoe'
scope = scope_name_generator()
self.scope_client.add_scope(account, scope)
self.scope_client.add_scope(account, scope)
def test_list_scopes(self):
""" SCOPE (CLIENTS): try to list scopes for an account."""
account = 'jdoe'
scope_list = [scope_name_generator() for _ in range(5)]
for scope in scope_list:
self.scope_client.add_scope(account, scope)
svr_list = self.scope_client.list_scopes_for_account(account)
for scope in scope_list:
if scope not in svr_list:
assert_true(False)
@raises(AccountNotFound)
def test_list_scopes_account_not_found(self):
""" SCOPE (CLIENTS): try to list scopes for a non existing account."""
account = account_name_generator()
self.scope_client.list_scopes_for_account(account)
@raises(ScopeNotFound)
def test_list_scopes_no_scopes(self):
""" SCOPE (CLIENTS): try to list scopes for an account without scopes."""
account = account_name_generator()
self.account_client.add_account(account, 'USER', 'rucio@email.com')
self.scope_client.list_scopes_for_account(account)
| 43.547529 | 145 | 0.659391 |
from json import dumps, loads
from paste.fixture import TestApp
from nose.tools import assert_equal, assert_true, assert_in, raises, assert_raises
from rucio.client.accountclient import AccountClient
from rucio.client.scopeclient import ScopeClient
from rucio.common.config import config_get, config_get_bool
from rucio.common.exception import AccountNotFound, Duplicate, ScopeNotFound, InvalidObject
from rucio.common.types import InternalAccount, InternalScope
from rucio.common.utils import generate_uuid as uuid
from rucio.core.scope import get_scopes, add_scope, is_scope_owner
from rucio.tests.common import account_name_generator, scope_name_generator
from rucio.web.rest.account import APP as account_app
from rucio.web.rest.authentication import APP as auth_app
class TestScopeCoreApi():
def __init__(self):
if config_get_bool('common', 'multi_vo', raise_exception=False, default=False):
self.vo = {'vo': config_get('client', 'vo', raise_exception=False, default='tst')}
else:
self.vo = {}
self.scopes = [InternalScope(scope_name_generator(), **self.vo) for _ in range(5)]
self.jdoe = InternalAccount('jdoe', **self.vo)
def test_list_scopes(self):
for scope in self.scopes:
add_scope(scope=scope, account=self.jdoe)
scopes = get_scopes(account=self.jdoe)
for scope in scopes:
assert_in(scope, scopes)
def test_is_scope_owner(self):
scope = InternalScope(scope_name_generator(), **self.vo)
add_scope(scope=scope, account=self.jdoe)
anwser = is_scope_owner(scope=scope, account=self.jdoe)
assert_equal(anwser, True)
class TestScope():
def __init__(self):
if config_get_bool('common', 'multi_vo', raise_exception=False, default=False):
self.vo_header = {'X-Rucio-VO': config_get('client', 'vo', raise_exception=False, default='tst')}
else:
self.vo_header = {}
self.scopes = [scope_name_generator() for _ in range(5)]
def test_scope_success(self):
mw = []
headers1 = {'X-Rucio-Account': 'root', 'X-Rucio-Username': 'ddmlab', 'X-Rucio-Password': 'secret'}
headers1.update(self.vo_header)
res1 = TestApp(auth_app.wsgifunc(*mw)).get('/userpass', headers=headers1, expect_errors=True)
assert_equal(res1.status, 200)
token = str(res1.header('X-Rucio-Auth-Token'))
headers2 = {'X-Rucio-Auth-Token': str(token)}
acntusr = account_name_generator()
data = dumps({'type': 'USER', 'email': 'rucio.email.com'})
res2 = TestApp(account_app.wsgifunc(*mw)).post('/' + acntusr, headers=headers2, params=data, expect_errors=True)
assert_equal(res2.status, 201)
headers3 = {'X-Rucio-Auth-Token': str(token)}
scopeusr = scope_name_generator()
res3 = TestApp(account_app.wsgifunc(*mw)).post('/%s/scopes/%s' % (acntusr, scopeusr), headers=headers3, expect_errors=True)
assert_equal(res3.status, 201)
def test_scope_failure(self):
mw = []
headers1 = {'X-Rucio-Account': 'root', 'X-Rucio-Username': 'ddmlab', 'X-Rucio-Password': 'secret'}
headers1.update(self.vo_header)
res1 = TestApp(auth_app.wsgifunc(*mw)).get('/userpass', headers=headers1, expect_errors=True)
assert_equal(res1.status, 200)
token = str(res1.header('X-Rucio-Auth-Token'))
headers2 = {'X-Rucio-Auth-Token': str(token)}
scopeusr = scope_name_generator()
account_name_generator()
res2 = TestApp(account_app.wsgifunc(*mw)).post('/%s/scopes/%s' % (scopeusr, scopeusr), headers=headers2, expect_errors=True)
assert_equal(res2.status, 404)
def test_scope_duplicate(self):
mw = []
headers1 = {'X-Rucio-Account': 'root', 'X-Rucio-Username': 'ddmlab', 'X-Rucio-Password': 'secret'}
headers1.update(self.vo_header)
res1 = TestApp(auth_app.wsgifunc(*mw)).get('/userpass', headers=headers1, expect_errors=True)
assert_equal(res1.status, 200)
token = str(res1.header('X-Rucio-Auth-Token'))
headers2 = {'X-Rucio-Auth-Token': str(token)}
acntusr = account_name_generator()
data = dumps({'type': 'USER', 'email': 'rucio@email.com'})
res2 = TestApp(account_app.wsgifunc(*mw)).post('/' + acntusr, headers=headers2, params=data, expect_errors=True)
assert_equal(res2.status, 201)
headers3 = {'X-Rucio-Auth-Token': str(token)}
scopeusr = scope_name_generator()
res3 = TestApp(account_app.wsgifunc(*mw)).post('/%s/scopes/%s' % (acntusr, scopeusr), headers=headers3, expect_errors=True)
assert_equal(res3.status, 201)
res3 = TestApp(account_app.wsgifunc(*mw)).post('/%s/scopes/%s' % (acntusr, scopeusr), headers=headers3, expect_errors=True)
assert_equal(res3.status, 409)
def test_list_scope(self):
mw = []
headers1 = {'X-Rucio-Account': 'root', 'X-Rucio-Username': 'ddmlab', 'X-Rucio-Password': 'secret'}
headers1.update(self.vo_header)
res1 = TestApp(auth_app.wsgifunc(*mw)).get('/userpass', headers=headers1, expect_errors=True)
assert_equal(res1.status, 200)
token = str(res1.header('X-Rucio-Auth-Token'))
tmp_val = account_name_generator()
headers2 = {'Rucio-Type': 'user', 'X-Rucio-Account': 'root', 'X-Rucio-Auth-Token': str(token)}
data = dumps({'type': 'USER', 'email': 'rucio@email.com'})
res2 = TestApp(account_app.wsgifunc(*mw)).post('/%s' % tmp_val, headers=headers2, params=data, expect_errors=True)
assert_equal(res2.status, 201)
headers3 = {'X-Rucio-Auth-Token': str(token)}
for scope in self.scopes:
data = dumps({})
res3 = TestApp(account_app.wsgifunc(*mw)).post('/%s/scopes/%s' % (tmp_val, scope), headers=headers3, params=data, expect_errors=True)
assert_equal(res3.status, 201)
res4 = TestApp(account_app.wsgifunc(*mw)).get('/%s/scopes/' % tmp_val, headers=headers3, expect_errors=True)
assert_equal(res4.status, 200)
svr_list = loads(res4.body)
for scope in self.scopes:
assert_in(scope, svr_list)
def test_list_scope_account_not_found(self):
mw = []
headers1 = {'X-Rucio-Account': 'root', 'X-Rucio-Username': 'ddmlab', 'X-Rucio-Password': 'secret'}
headers1.update(self.vo_header)
res1 = TestApp(auth_app.wsgifunc(*mw)).get('/userpass', headers=headers1, expect_errors=True)
assert_equal(res1.status, 200)
token = str(res1.header('X-Rucio-Auth-Token'))
headers3 = {'X-Rucio-Auth-Token': str(token)}
res3 = TestApp(account_app.wsgifunc(*mw)).get('/testaccount/scopes', headers=headers3, expect_errors=True)
assert_equal(res3.status, 404)
assert_equal(res3.header('ExceptionClass'), 'AccountNotFound')
def test_list_scope_no_scopes(self):
mw = []
headers1 = {'X-Rucio-Account': 'root', 'X-Rucio-Username': 'ddmlab', 'X-Rucio-Password': 'secret'}
headers1.update(self.vo_header)
res1 = TestApp(auth_app.wsgifunc(*mw)).get('/userpass', headers=headers1, expect_errors=True)
assert_equal(res1.status, 200)
token = str(res1.header('X-Rucio-Auth-Token'))
headers2 = {'X-Rucio-Auth-Token': str(token)}
acntusr = account_name_generator()
data = dumps({'type': 'USER', 'email': 'rucio@email.com'})
res2 = TestApp(account_app.wsgifunc(*mw)).post('/' + acntusr, headers=headers2, params=data, expect_errors=True)
assert_equal(res2.status, 201)
headers3 = {'X-Rucio-Auth-Token': str(token)}
res4 = TestApp(account_app.wsgifunc(*mw)).get('/%s/scopes/' % (acntusr), headers=headers3, params=data, expect_errors=True)
assert_equal(res4.status, 404)
assert_equal(res4.header('ExceptionClass'), 'ScopeNotFound')
class TestScopeClient():
def __init__(self):
self.account_client = AccountClient()
self.scope_client = ScopeClient()
def test_create_scope(self):
account = 'jdoe'
scope = scope_name_generator()
ret = self.scope_client.add_scope(account, scope)
assert_true(ret)
with assert_raises(InvalidObject):
self.scope_client.add_scope(account, 'tooooolooooongscooooooooooooope')
with assert_raises(InvalidObject):
self.scope_client.add_scope(account, '$?!')
@raises(AccountNotFound)
def test_create_scope_no_account(self):
account = str(uuid()).lower()[:30]
scope = scope_name_generator()
self.scope_client.add_scope(account, scope)
@raises(Duplicate)
def test_create_scope_duplicate(self):
account = 'jdoe'
scope = scope_name_generator()
self.scope_client.add_scope(account, scope)
self.scope_client.add_scope(account, scope)
def test_list_scopes(self):
account = 'jdoe'
scope_list = [scope_name_generator() for _ in range(5)]
for scope in scope_list:
self.scope_client.add_scope(account, scope)
svr_list = self.scope_client.list_scopes_for_account(account)
for scope in scope_list:
if scope not in svr_list:
assert_true(False)
@raises(AccountNotFound)
def test_list_scopes_account_not_found(self):
account = account_name_generator()
self.scope_client.list_scopes_for_account(account)
@raises(ScopeNotFound)
def test_list_scopes_no_scopes(self):
account = account_name_generator()
self.account_client.add_account(account, 'USER', 'rucio@email.com')
self.scope_client.list_scopes_for_account(account)
| true | true |
f71f18791975016707e6e7b272eed9733353c44e | 14,409 | py | Python | examples/pytorch/eager/image_recognition/cifar100/main.py | kevinintel/neural-compressor | b57645566aeff8d3c18dc49d2739a583c072f940 | [
"Apache-2.0"
] | 172 | 2021-09-14T18:34:17.000Z | 2022-03-30T06:49:53.000Z | examples/pytorch/eager/image_recognition/cifar100/main.py | kevinintel/neural-compressor | b57645566aeff8d3c18dc49d2739a583c072f940 | [
"Apache-2.0"
] | 40 | 2021-09-14T02:26:12.000Z | 2022-03-29T08:34:04.000Z | examples/pytorch/eager/image_recognition/cifar100/main.py | kevinintel/neural-compressor | b57645566aeff8d3c18dc49d2739a583c072f940 | [
"Apache-2.0"
] | 33 | 2021-09-15T07:27:25.000Z | 2022-03-25T08:30:57.000Z | import os
import time
import shutil
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models.vgg as vgg
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from plain_cnn_cifar import ConvNetMaker, plane_cifar100_book
# used for logging to TensorBoard
from tensorboard_logger import configure, log_value
parser = argparse.ArgumentParser(description='PyTorch CNN or VGG Training')
parser.add_argument('--dataset', default='cifar100', type=str,
help='dataset cifar100')
parser.add_argument('--epochs', default=200, type=int,
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int,
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=128, type=int,
help='mini-batch size (default: 128)')
parser.add_argument('--lr', '--learning-rate', default=0.02, type=float,
help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--nesterov', default=True, type=bool, help='nesterov momentum')
parser.add_argument('--weight-decay', '--wd', default=5e-4, type=float,
help='weight decay (default: 5e-4)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
help='print frequency (default: 10)')
parser.add_argument('--droprate', default=0, type=float,
help='dropout probability (default: 0.0)')
parser.add_argument('--no-augment', dest='augment', action='store_false',
help='whether to use standard augmentation (default: True)')
parser.add_argument('--resume', default='', type=str,
help='path to latest checkpoint (default: none)')
parser.add_argument('--name', default='CNN-2', type=str,
help='name of experiment')
parser.add_argument('--student_type', default='CNN-2', type=str,
help='type of student model (CNN-2 [default] or VGG-8)')
parser.add_argument('--teacher_type', default='CNN-10', type=str,
help='type of teacher model (CNN-10 [default] or VGG-13)')
parser.add_argument('--teacher_model', default='runs/CNN-10/model_best.pth.tar', type=str,
help='path of teacher model')
parser.add_argument('--tensorboard',
help='Log progress to TensorBoard', action='store_true')
parser.add_argument("--seed", type=int, default=5143, help="A seed for reproducible training.")
parser.add_argument("--config", default='distillation.yaml', help="pruning config")
parser.add_argument("--temperature", default=1, type=float,
help='temperature parameter of distillation')
parser.add_argument("--loss_types", default=['CE', 'KL'], type=str, nargs='+',
help='loss types of distillation, should be a list of length 2, '
'first for student targets loss, second for teacher student loss.')
parser.add_argument("--loss_weights", default=[0.5, 0.5], type=float, nargs='+',
help='loss weights of distillation, should be a list of length 2, '
'and sum to 1.0, first for student targets loss weight, '
'second for teacher student loss weight.')
parser.set_defaults(augment=True)
def set_seed(seed):
import random
import numpy as np
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def main():
global args, best_prec1
args, _ = parser.parse_known_args()
best_prec1 = 0
if args.seed is not None:
set_seed(args.seed)
if args.tensorboard: configure("runs/%s"%(args.name))
# Data loading code
normalize = transforms.Normalize(mean=[0.5071, 0.4866, 0.4409], std=[0.2675, 0.2565, 0.2761])
if args.augment:
transform_train = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: F.pad(x.unsqueeze(0),
(4,4,4,4),mode='reflect').squeeze()),
transforms.ToPILImage(),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
else:
transform_train = transforms.Compose([
transforms.ToTensor(),
normalize,
])
transform_test = transforms.Compose([
transforms.ToTensor(),
normalize
])
# create teacher and student model
if args.teacher_type == 'CNN-10':
teacher_model = ConvNetMaker(plane_cifar100_book['10'])
elif args.teacher_type == 'VGG-13':
teacher_model = vgg.vgg13(num_classes=100)
else:
raise NotImplementedError('Unsupported teacher model type')
teacher_model.load_state_dict(torch.load(args.teacher_model)['state_dict'])
if args.student_type == 'CNN-2':
student_model = ConvNetMaker(plane_cifar100_book['2'])
elif args.student_type == 'VGG-8':
student_model = vgg.VGG(vgg.make_layers([64, 'M', 128, 'M', 256, 'M', 512, 'M', 512, 'M']), num_classes=100)
else:
raise NotImplementedError('Unsupported student model type')
# get the number of model parameters
print('Number of teacher model parameters: {}'.format(
sum([p.data.nelement() for p in teacher_model.parameters()])))
print('Number of student model parameters: {}'.format(
sum([p.data.nelement() for p in student_model.parameters()])))
kwargs = {'num_workers': 0, 'pin_memory': True}
assert(args.dataset == 'cifar100')
train_dataset = datasets.__dict__[args.dataset.upper()]('../data',
train=True, download=True,
transform=transform_train)
# get logits of teacher model
if args.loss_weights[1] > 0:
from tqdm import tqdm
def get_logits(teacher_model, train_dataset):
print("***** Getting logits of teacher model *****")
print(f" Num examples = {len(train_dataset) }")
logits_file = os.path.join(os.path.dirname(args.teacher_model), 'teacher_logits.npy')
if not os.path.exists(logits_file):
teacher_model.eval()
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, **kwargs)
train_dataloader = tqdm(train_dataloader, desc="Evaluating")
teacher_logits = []
for step, (input, target) in enumerate(train_dataloader):
outputs = teacher_model(input)
teacher_logits += [x for x in outputs.numpy()]
np.save(logits_file, np.array(teacher_logits))
else:
teacher_logits = np.load(logits_file)
train_dataset.targets = [{'labels':l, 'teacher_logits':tl} \
for l, tl in zip(train_dataset.targets, teacher_logits)]
return train_dataset
with torch.no_grad():
train_dataset = get_logits(teacher_model, train_dataset)
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = torch.utils.data.DataLoader(
datasets.__dict__[args.dataset.upper()]('../data', train=False, transform=transform_test),
batch_size=args.batch_size, shuffle=True, **kwargs)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
student_model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
# define optimizer
optimizer = torch.optim.SGD(student_model.parameters(), args.lr,
momentum=args.momentum, nesterov = args.nesterov,
weight_decay=args.weight_decay)
# cosine learning rate
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, len(train_loader)*args.epochs)
def train_func(model):
return train(train_loader, model, scheduler, distiller, best_prec1)
def eval_func(model):
return validate(val_loader, model, distiller)
from neural_compressor.experimental import Distillation, common
from neural_compressor.experimental.common.criterion import PyTorchKnowledgeDistillationLoss
distiller = Distillation(args.config)
distiller.teacher_model = common.Model(teacher_model)
distiller.student_model = common.Model(student_model)
distiller.train_func = train_func
distiller.eval_func = eval_func
distiller.optimizer = optimizer
distiller.criterion = PyTorchKnowledgeDistillationLoss(
temperature=args.temperature,
loss_types=args.loss_types,
loss_weights=args.loss_weights)
model = distiller()
directory = "runs/%s/"%(args.name)
os.makedirs(directory, exist_ok=True)
model.save(directory)
# change to framework model for further use
model = model.model
def train(train_loader, model, scheduler, distiller, best_prec1):
distiller.pre_epoch_begin()
for epoch in range(args.start_epoch, args.epochs):
"""Train for one epoch on the training set"""
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
teacher_logits = None
if isinstance(target, dict):
teacher_logits = target['teacher_logits']
target = target['labels']
# compute output
output = model(input)
distiller.on_post_forward(input, teacher_logits)
loss = distiller.criterion(output, target)
# measure accuracy and record loss
prec1 = accuracy(output.data, target, topk=(1,))[0]
losses.update(loss.data.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
# compute gradient and do SGD step
distiller.optimizer.zero_grad()
loss.backward()
distiller.optimizer.step()
scheduler.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'LR {scheduler._last_lr[0]:.6f}'.format(
epoch, i, len(train_loader), batch_time=batch_time,
loss=losses, top1=top1, scheduler=scheduler))
distiller.on_epoch_end()
# remember best prec@1 and save checkpoint
is_best = distiller.best_score > best_prec1
best_prec1 = max(distiller.best_score, best_prec1)
save_checkpoint({
'epoch': distiller._epoch_runned + 1,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
}, is_best)
# log to TensorBoard
if args.tensorboard:
log_value('train_loss', losses.avg, epoch)
log_value('train_acc', top1.avg, epoch)
log_value('learning_rate', scheduler._last_lr[0], epoch)
def validate(val_loader, model, distiller):
"""Perform validation on the validation set"""
batch_time = AverageMeter()
top1 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
# compute output
with torch.no_grad():
output = model(input)
# measure accuracy
prec1 = accuracy(output.data, target, topk=(1,))[0]
top1.update(prec1.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time,
top1=top1))
print(' * Prec@1 {top1.avg:.3f}'.format(top1=top1))
# log to TensorBoard
if args.tensorboard:
log_value('val_acc', top1.avg, distiller._epoch_runned)
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
"""Saves checkpoint to disk"""
directory = "runs/%s/"%(args.name)
if not os.path.exists(directory):
os.makedirs(directory)
filename = directory + filename
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'runs/%s/'%(args.name) + 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main() | 41.051282 | 116 | 0.613575 | import os
import time
import shutil
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models.vgg as vgg
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from plain_cnn_cifar import ConvNetMaker, plane_cifar100_book
from tensorboard_logger import configure, log_value
parser = argparse.ArgumentParser(description='PyTorch CNN or VGG Training')
parser.add_argument('--dataset', default='cifar100', type=str,
help='dataset cifar100')
parser.add_argument('--epochs', default=200, type=int,
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int,
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=128, type=int,
help='mini-batch size (default: 128)')
parser.add_argument('--lr', '--learning-rate', default=0.02, type=float,
help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--nesterov', default=True, type=bool, help='nesterov momentum')
parser.add_argument('--weight-decay', '--wd', default=5e-4, type=float,
help='weight decay (default: 5e-4)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
help='print frequency (default: 10)')
parser.add_argument('--droprate', default=0, type=float,
help='dropout probability (default: 0.0)')
parser.add_argument('--no-augment', dest='augment', action='store_false',
help='whether to use standard augmentation (default: True)')
parser.add_argument('--resume', default='', type=str,
help='path to latest checkpoint (default: none)')
parser.add_argument('--name', default='CNN-2', type=str,
help='name of experiment')
parser.add_argument('--student_type', default='CNN-2', type=str,
help='type of student model (CNN-2 [default] or VGG-8)')
parser.add_argument('--teacher_type', default='CNN-10', type=str,
help='type of teacher model (CNN-10 [default] or VGG-13)')
parser.add_argument('--teacher_model', default='runs/CNN-10/model_best.pth.tar', type=str,
help='path of teacher model')
parser.add_argument('--tensorboard',
help='Log progress to TensorBoard', action='store_true')
parser.add_argument("--seed", type=int, default=5143, help="A seed for reproducible training.")
parser.add_argument("--config", default='distillation.yaml', help="pruning config")
parser.add_argument("--temperature", default=1, type=float,
help='temperature parameter of distillation')
parser.add_argument("--loss_types", default=['CE', 'KL'], type=str, nargs='+',
help='loss types of distillation, should be a list of length 2, '
'first for student targets loss, second for teacher student loss.')
parser.add_argument("--loss_weights", default=[0.5, 0.5], type=float, nargs='+',
help='loss weights of distillation, should be a list of length 2, '
'and sum to 1.0, first for student targets loss weight, '
'second for teacher student loss weight.')
parser.set_defaults(augment=True)
def set_seed(seed):
import random
import numpy as np
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def main():
global args, best_prec1
args, _ = parser.parse_known_args()
best_prec1 = 0
if args.seed is not None:
set_seed(args.seed)
if args.tensorboard: configure("runs/%s"%(args.name))
normalize = transforms.Normalize(mean=[0.5071, 0.4866, 0.4409], std=[0.2675, 0.2565, 0.2761])
if args.augment:
transform_train = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: F.pad(x.unsqueeze(0),
(4,4,4,4),mode='reflect').squeeze()),
transforms.ToPILImage(),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
else:
transform_train = transforms.Compose([
transforms.ToTensor(),
normalize,
])
transform_test = transforms.Compose([
transforms.ToTensor(),
normalize
])
if args.teacher_type == 'CNN-10':
teacher_model = ConvNetMaker(plane_cifar100_book['10'])
elif args.teacher_type == 'VGG-13':
teacher_model = vgg.vgg13(num_classes=100)
else:
raise NotImplementedError('Unsupported teacher model type')
teacher_model.load_state_dict(torch.load(args.teacher_model)['state_dict'])
if args.student_type == 'CNN-2':
student_model = ConvNetMaker(plane_cifar100_book['2'])
elif args.student_type == 'VGG-8':
student_model = vgg.VGG(vgg.make_layers([64, 'M', 128, 'M', 256, 'M', 512, 'M', 512, 'M']), num_classes=100)
else:
raise NotImplementedError('Unsupported student model type')
print('Number of teacher model parameters: {}'.format(
sum([p.data.nelement() for p in teacher_model.parameters()])))
print('Number of student model parameters: {}'.format(
sum([p.data.nelement() for p in student_model.parameters()])))
kwargs = {'num_workers': 0, 'pin_memory': True}
assert(args.dataset == 'cifar100')
train_dataset = datasets.__dict__[args.dataset.upper()]('../data',
train=True, download=True,
transform=transform_train)
if args.loss_weights[1] > 0:
from tqdm import tqdm
def get_logits(teacher_model, train_dataset):
print("***** Getting logits of teacher model *****")
print(f" Num examples = {len(train_dataset) }")
logits_file = os.path.join(os.path.dirname(args.teacher_model), 'teacher_logits.npy')
if not os.path.exists(logits_file):
teacher_model.eval()
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, **kwargs)
train_dataloader = tqdm(train_dataloader, desc="Evaluating")
teacher_logits = []
for step, (input, target) in enumerate(train_dataloader):
outputs = teacher_model(input)
teacher_logits += [x for x in outputs.numpy()]
np.save(logits_file, np.array(teacher_logits))
else:
teacher_logits = np.load(logits_file)
train_dataset.targets = [{'labels':l, 'teacher_logits':tl} \
for l, tl in zip(train_dataset.targets, teacher_logits)]
return train_dataset
with torch.no_grad():
train_dataset = get_logits(teacher_model, train_dataset)
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = torch.utils.data.DataLoader(
datasets.__dict__[args.dataset.upper()]('../data', train=False, transform=transform_test),
batch_size=args.batch_size, shuffle=True, **kwargs)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
student_model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
optimizer = torch.optim.SGD(student_model.parameters(), args.lr,
momentum=args.momentum, nesterov = args.nesterov,
weight_decay=args.weight_decay)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, len(train_loader)*args.epochs)
def train_func(model):
return train(train_loader, model, scheduler, distiller, best_prec1)
def eval_func(model):
return validate(val_loader, model, distiller)
from neural_compressor.experimental import Distillation, common
from neural_compressor.experimental.common.criterion import PyTorchKnowledgeDistillationLoss
distiller = Distillation(args.config)
distiller.teacher_model = common.Model(teacher_model)
distiller.student_model = common.Model(student_model)
distiller.train_func = train_func
distiller.eval_func = eval_func
distiller.optimizer = optimizer
distiller.criterion = PyTorchKnowledgeDistillationLoss(
temperature=args.temperature,
loss_types=args.loss_types,
loss_weights=args.loss_weights)
model = distiller()
directory = "runs/%s/"%(args.name)
os.makedirs(directory, exist_ok=True)
model.save(directory)
model = model.model
def train(train_loader, model, scheduler, distiller, best_prec1):
distiller.pre_epoch_begin()
for epoch in range(args.start_epoch, args.epochs):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
teacher_logits = None
if isinstance(target, dict):
teacher_logits = target['teacher_logits']
target = target['labels']
output = model(input)
distiller.on_post_forward(input, teacher_logits)
loss = distiller.criterion(output, target)
prec1 = accuracy(output.data, target, topk=(1,))[0]
losses.update(loss.data.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
distiller.optimizer.zero_grad()
loss.backward()
distiller.optimizer.step()
scheduler.step()
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'LR {scheduler._last_lr[0]:.6f}'.format(
epoch, i, len(train_loader), batch_time=batch_time,
loss=losses, top1=top1, scheduler=scheduler))
distiller.on_epoch_end()
is_best = distiller.best_score > best_prec1
best_prec1 = max(distiller.best_score, best_prec1)
save_checkpoint({
'epoch': distiller._epoch_runned + 1,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
}, is_best)
if args.tensorboard:
log_value('train_loss', losses.avg, epoch)
log_value('train_acc', top1.avg, epoch)
log_value('learning_rate', scheduler._last_lr[0], epoch)
def validate(val_loader, model, distiller):
batch_time = AverageMeter()
top1 = AverageMeter()
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
with torch.no_grad():
output = model(input)
prec1 = accuracy(output.data, target, topk=(1,))[0]
top1.update(prec1.item(), input.size(0))
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time,
top1=top1))
print(' * Prec@1 {top1.avg:.3f}'.format(top1=top1))
if args.tensorboard:
log_value('val_acc', top1.avg, distiller._epoch_runned)
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
directory = "runs/%s/"%(args.name)
if not os.path.exists(directory):
os.makedirs(directory)
filename = directory + filename
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'runs/%s/'%(args.name) + 'model_best.pth.tar')
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main() | true | true |
f71f18898c8292f215084d67a0492fc48f5a9d6c | 8,974 | py | Python | main.py | PabloEmidio/Know-Weather-GTK | 797f25cbd0c8e1a2f124a5328d9decf2f3829252 | [
"MIT"
] | 4 | 2021-05-06T02:07:02.000Z | 2021-05-06T17:48:08.000Z | main.py | PabloEmidio/Know-Weather-GTK | 797f25cbd0c8e1a2f124a5328d9decf2f3829252 | [
"MIT"
] | null | null | null | main.py | PabloEmidio/Know-Weather-GTK | 797f25cbd0c8e1a2f124a5328d9decf2f3829252 | [
"MIT"
] | null | null | null | import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
from datetime import datetime
from api_request import Weather
builder = Gtk.Builder()
builder.add_from_file('./glade/main.glade')
class Handler:
def __init__(self, *args, **kwargs):
super(Handler, self).__init__(*args, **kwargs)
self.weather_instance = Weather()
self.entry = builder.get_object('entry')
self.btn_search = builder.get_object('btn_search')
self.city_name = builder.get_object('city_name')
self.city_text = builder.get_object('city_text')
self.main_temp = builder.get_object('main_temp')
self.which_temp_simbol_is = 'Celsius'
self.weekday_name = builder.get_object('weekday_name')
self.weekday_name_today = builder.get_object('weekday_name_today')
self.temp_today_max = builder.get_object('today_max')
self.temp_today_min = builder.get_object('today_min')
self.hour_1_now = builder.get_object('hour_1_now')
self.hour_1_chance_of_rain = builder.get_object('hour_1_chance_of_rain')
self.hour_1_icon = builder.get_object('hour_1_icon')
self.hour_1_temp = builder.get_object('hour_1_temp')
self.hour_2_clock = builder.get_object('hour_2_clock')
self.hour_2_chance_of_rain = builder.get_object('hour_2_chance_of_rain')
self.hour_2_icon = builder.get_object('hour_2_icon')
self.hour_2_temp = builder.get_object('hour_2_temp')
self.hour_3_clock = builder.get_object('hour_3_clock')
self.hour_3_chance_of_rain = builder.get_object('hour_3_chance_of_rain')
self.hour_3_icon = builder.get_object('hour_3_icon')
self.hour_3_temp = builder.get_object('hour_3_temp')
self.hour_4_clock = builder.get_object('hour_4_clock')
self.hour_4_chance_of_rain = builder.get_object('hour_4_chance_of_rain')
self.hour_4_icon = builder.get_object('hour_4_icon')
self.hour_4_temp = builder.get_object('hour_4_temp')
self.hour_5_clock = builder.get_object('hour_5_clock')
self.hour_5_chance_of_rain = builder.get_object('hour_5_chance_of_rain')
self.hour_5_icon = builder.get_object('hour_5_icon')
self.hour_5_temp = builder.get_object('hour_5_temp')
self.day_1_name = builder.get_object('day_1_name')
self.day_1_icon = builder.get_object('day_1_icon')
self.day_1_temp_max = builder.get_object('day_1_temp_max')
self.day_1_temp_min = builder.get_object('day_1_temp_min')
self.day_2_name = builder.get_object('day_2_name')
self.day_2_icon = builder.get_object('day_2_icon')
self.day_2_temp_max = builder.get_object('day_2_temp_max')
self.day_2_temp_min = builder.get_object('day_2_temp_min')
def onDestroy(self, *args):
Gtk.main_quit()
def on_button_search_clicked(self, widget):
# now.strftime('%A') to know how weekday is
import re, unicodedata
word = unicodedata.normalize('NFD', self.entry.get_text())
word = re.sub('[\u0300-\u036f]', '', word)
try:
now = datetime.now()
current_hour = int(now.strftime('%H'))
current_search = self.weather_instance.get_weather_info(word, current_hour=current_hour)
self.city_name.set_text(current_search['location']['name'] + '/' + current_search['location']['region'])
self.city_text.set_text(current_search['current']['condition']['text'])
self.main_temp.set_text(str(int(current_search['current']['temp_c'])) + '°')
weekday = now.strftime('%A')
self.weekday_name.set_text(weekday)
self.weekday_name_today.set_text('Today')
today_max_temp = str(int(current_search['forecast']['forecastday'][0]['day']['maxtemp_c']))
today_min_temp = str(int(current_search['forecast']['forecastday'][0]['day']['mintemp_c']))
self.temp_today_max.set_text(today_max_temp)
self.temp_today_min.set_text(today_min_temp)
### Hours informations ######################################################
def is_available(increase: int) -> bool:
return not (current_hour + increase > 23)
if is_available(0):
self.hour_1_now.set_text('Now')
if int(chance_of_rain := current_search['forecast']['forecastday'][0]['hour'][current_hour]['chance_of_rain'])>0:
self.hour_1_chance_of_rain.set_text(str(chance_of_rain) + '%')
self.hour_1_temp.set_text(str(int(current_search['forecast']['forecastday'][0]['hour'][current_hour]['temp_c'])))
else:
self.hour_1_now.set_text('unavailable')
self.hour_1_temp.set_text('tomorrow')
self.hour_1_icon.set_from_file('./images/hour_icon/1.png')
if is_available(1):
self.hour_2_clock.set_text(str(int(now.strftime('%I'))+1) + now.strftime('%p'))
if int(chance_of_rain := current_search['forecast']['forecastday'][0]['hour'][current_hour+1]['chance_of_rain'])>0:
self.hour_1_chance_of_rain.set_text(str(chance_of_rain) + '%')
self.hour_2_temp.set_text(str(int(current_search['forecast']['forecastday'][0]['hour'][current_hour+1]['temp_c'])))
else:
self.hour_2_clock.set_text('unavailable')
self.hour_2_temp.set_text('tomorrow')
self.hour_2_icon.set_from_file('./images/hour_icon/2.png')
if is_available(2):
self.hour_3_clock.set_text(str(int(now.strftime('%I'))+2) + now.strftime('%p'))
if int(chance_of_rain := current_search['forecast']['forecastday'][0]['hour'][current_hour+2]['chance_of_rain'])>0:
self.hour_3_chance_of_rain.set_text(str(chance_of_rain) + '%')
self.hour_3_temp.set_text(str(int(current_search['forecast']['forecastday'][0]['hour'][current_hour+2]['temp_c'])))
else:
self.hour_3_clock.set_text('unavailable')
self.hour_3_temp.set_text('tomorrow')
self.hour_3_icon.set_from_file('./images/hour_icon/3.png')
if is_available(3):
self.hour_4_clock.set_text(str(int(now.strftime('%I'))+3) + now.strftime('%p'))
if int(chance_of_rain := current_search['forecast']['forecastday'][0]['hour'][current_hour+3]['chance_of_rain'])>0:
self.hour_4_chance_of_rain.set_text(str(chance_of_rain) + '%')
self.hour_4_temp.set_text(str(int(current_search['forecast']['forecastday'][0]['hour'][current_hour+3]['temp_c'])))
else:
self.hour_4_clock.set_text('unavailable')
self.hour_4_temp.set_text('tomorrow')
self.hour_4_icon.set_from_file('./images/hour_icon/4.png')
if is_available(4):
self.hour_5_clock.set_text(str(int(now.strftime('%I'))+4) + now.strftime('%p'))
if int(chance_of_rain := current_search['forecast']['forecastday'][0]['hour'][current_hour+3]['chance_of_rain'])>0:
self.hour_5_chance_of_rain.set_text(str(chance_of_rain) + '%')
self.hour_5_temp.set_text(str(int(current_search['forecast']['forecastday'][0]['hour'][current_hour+4]['temp_c'])))
else:
self.hour_5_clock.set_text('unavailable')
self.hour_5_temp.set_text('tomorrow')
self.hour_5_icon.set_from_file('./images/hour_icon/5.png')
### days informations ######################################################
self.day_1_name.set_text(datetime.fromisoformat(current_search['forecast']['forecastday'][1]['date']).strftime('%A'))
self.day_1_icon.set_from_file('./images/days_icon/1.png')
self.day_1_temp_max.set_text(str(int(current_search['forecast']['forecastday'][1]['day']['maxtemp_c'])))
self.day_1_temp_min.set_text(str(int(current_search['forecast']['forecastday'][1]['day']['mintemp_c'])))
self.day_2_name.set_text(datetime.fromisoformat(current_search['forecast']['forecastday'][2]['date']).strftime('%A'))
self.day_2_icon.set_from_file('./images/days_icon/2.png')
self.day_2_temp_max.set_text(str(int(current_search['forecast']['forecastday'][2]['day']['maxtemp_c'])))
self.day_2_temp_min.set_text(str(int(current_search['forecast']['forecastday'][2]['day']['mintemp_c'])))
except Exception as error:
print(f'error {error}')
builder.connect_signals(Handler())
window = builder.get_object('window')
window.show_all()
Gtk.main() | 53.736527 | 131 | 0.622131 | import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
from datetime import datetime
from api_request import Weather
builder = Gtk.Builder()
builder.add_from_file('./glade/main.glade')
class Handler:
def __init__(self, *args, **kwargs):
super(Handler, self).__init__(*args, **kwargs)
self.weather_instance = Weather()
self.entry = builder.get_object('entry')
self.btn_search = builder.get_object('btn_search')
self.city_name = builder.get_object('city_name')
self.city_text = builder.get_object('city_text')
self.main_temp = builder.get_object('main_temp')
self.which_temp_simbol_is = 'Celsius'
self.weekday_name = builder.get_object('weekday_name')
self.weekday_name_today = builder.get_object('weekday_name_today')
self.temp_today_max = builder.get_object('today_max')
self.temp_today_min = builder.get_object('today_min')
self.hour_1_now = builder.get_object('hour_1_now')
self.hour_1_chance_of_rain = builder.get_object('hour_1_chance_of_rain')
self.hour_1_icon = builder.get_object('hour_1_icon')
self.hour_1_temp = builder.get_object('hour_1_temp')
self.hour_2_clock = builder.get_object('hour_2_clock')
self.hour_2_chance_of_rain = builder.get_object('hour_2_chance_of_rain')
self.hour_2_icon = builder.get_object('hour_2_icon')
self.hour_2_temp = builder.get_object('hour_2_temp')
self.hour_3_clock = builder.get_object('hour_3_clock')
self.hour_3_chance_of_rain = builder.get_object('hour_3_chance_of_rain')
self.hour_3_icon = builder.get_object('hour_3_icon')
self.hour_3_temp = builder.get_object('hour_3_temp')
self.hour_4_clock = builder.get_object('hour_4_clock')
self.hour_4_chance_of_rain = builder.get_object('hour_4_chance_of_rain')
self.hour_4_icon = builder.get_object('hour_4_icon')
self.hour_4_temp = builder.get_object('hour_4_temp')
self.hour_5_clock = builder.get_object('hour_5_clock')
self.hour_5_chance_of_rain = builder.get_object('hour_5_chance_of_rain')
self.hour_5_icon = builder.get_object('hour_5_icon')
self.hour_5_temp = builder.get_object('hour_5_temp')
self.day_1_name = builder.get_object('day_1_name')
self.day_1_icon = builder.get_object('day_1_icon')
self.day_1_temp_max = builder.get_object('day_1_temp_max')
self.day_1_temp_min = builder.get_object('day_1_temp_min')
self.day_2_name = builder.get_object('day_2_name')
self.day_2_icon = builder.get_object('day_2_icon')
self.day_2_temp_max = builder.get_object('day_2_temp_max')
self.day_2_temp_min = builder.get_object('day_2_temp_min')
def onDestroy(self, *args):
Gtk.main_quit()
def on_button_search_clicked(self, widget):
import re, unicodedata
word = unicodedata.normalize('NFD', self.entry.get_text())
word = re.sub('[\u0300-\u036f]', '', word)
try:
now = datetime.now()
current_hour = int(now.strftime('%H'))
current_search = self.weather_instance.get_weather_info(word, current_hour=current_hour)
self.city_name.set_text(current_search['location']['name'] + '/' + current_search['location']['region'])
self.city_text.set_text(current_search['current']['condition']['text'])
self.main_temp.set_text(str(int(current_search['current']['temp_c'])) + '°')
weekday = now.strftime('%A')
self.weekday_name.set_text(weekday)
self.weekday_name_today.set_text('Today')
today_max_temp = str(int(current_search['forecast']['forecastday'][0]['day']['maxtemp_c']))
today_min_temp = str(int(current_search['forecast']['forecastday'][0]['day']['mintemp_c']))
self.temp_today_max.set_text(today_max_temp)
self.temp_today_min.set_text(today_min_temp)
arch['forecast']['forecastday'][0]['hour'][current_hour+2]['chance_of_rain'])>0:
self.hour_3_chance_of_rain.set_text(str(chance_of_rain) + '%')
self.hour_3_temp.set_text(str(int(current_search['forecast']['forecastday'][0]['hour'][current_hour+2]['temp_c'])))
else:
self.hour_3_clock.set_text('unavailable')
self.hour_3_temp.set_text('tomorrow')
self.hour_3_icon.set_from_file('./images/hour_icon/3.png')
if is_available(3):
self.hour_4_clock.set_text(str(int(now.strftime('%I'))+3) + now.strftime('%p'))
if int(chance_of_rain := current_search['forecast']['forecastday'][0]['hour'][current_hour+3]['chance_of_rain'])>0:
self.hour_4_chance_of_rain.set_text(str(chance_of_rain) + '%')
self.hour_4_temp.set_text(str(int(current_search['forecast']['forecastday'][0]['hour'][current_hour+3]['temp_c'])))
else:
self.hour_4_clock.set_text('unavailable')
self.hour_4_temp.set_text('tomorrow')
self.hour_4_icon.set_from_file('./images/hour_icon/4.png')
if is_available(4):
self.hour_5_clock.set_text(str(int(now.strftime('%I'))+4) + now.strftime('%p'))
if int(chance_of_rain := current_search['forecast']['forecastday'][0]['hour'][current_hour+3]['chance_of_rain'])>0:
self.hour_5_chance_of_rain.set_text(str(chance_of_rain) + '%')
self.hour_5_temp.set_text(str(int(current_search['forecast']['forecastday'][0]['hour'][current_hour+4]['temp_c'])))
else:
self.hour_5_clock.set_text('unavailable')
self.hour_5_temp.set_text('tomorrow')
self.hour_5_icon.set_from_file('./images/hour_icon/5.png')
| true | true |
f71f18bc90c86155e0835c84ecc4093b469ef8c1 | 338 | py | Python | ufba/modulos_e_excecoes/programaprincipal.py | rafaelsqueiroz/learning_phase | 6a04da40ba50e24a9ab79f940c8e4820ad34c07d | [
"MIT"
] | null | null | null | ufba/modulos_e_excecoes/programaprincipal.py | rafaelsqueiroz/learning_phase | 6a04da40ba50e24a9ab79f940c8e4820ad34c07d | [
"MIT"
] | 1 | 2019-10-31T19:51:27.000Z | 2019-10-31T19:51:27.000Z | ufba/modulos_e_excecoes/programaprincipal.py | rafaelsqueiroz/learning_phase | 6a04da40ba50e24a9ab79f940c8e4820ad34c07d | [
"MIT"
] | 1 | 2019-10-23T18:00:16.000Z | 2019-10-23T18:00:16.000Z | # -*- coding: utf-8 -*-
"""
Created on Wed May 12 16:33:11 2021
@author: Rafael Queiroz
"""
import ordenaarquivo
c1 = ordenaarquivo.OrdenaColunaStd('dados.txt', 2) # variável criada com base no OrdenaColunaStd
c2 = ordenaarquivo.OrdenaColunaMySort('dados.txt', 2) # variável criada com base no OrdenaColunaMySort
print(c1)
print(c2)
| 21.125 | 102 | 0.736686 |
import ordenaarquivo
c1 = ordenaarquivo.OrdenaColunaStd('dados.txt', 2)
c2 = ordenaarquivo.OrdenaColunaMySort('dados.txt', 2)
print(c1)
print(c2)
| true | true |
f71f18c04b849a12ab67707e2dc21b53b542b7e7 | 1,209 | py | Python | Library/usbscsi.py | P3nguin-M/edl | 967220426ad820e3d0ed471bbe7013ca0eb4a33c | [
"MIT"
] | 2 | 2020-08-26T09:23:40.000Z | 2020-10-08T20:32:05.000Z | Library/usbscsi.py | P3nguin-M/edl | 967220426ad820e3d0ed471bbe7013ca0eb4a33c | [
"MIT"
] | null | null | null | Library/usbscsi.py | P3nguin-M/edl | 967220426ad820e3d0ed471bbe7013ca0eb4a33c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
from Library.usblib import *
def main():
info='MassStorageBackdoor (c) B.Kerler 2019.'
parser = argparse.ArgumentParser(description=info)
print("\n"+info+"\n\n")
parser.add_argument('-vid',metavar="<vid>",help='[Option] Specify vid, default=0x2e04)', default="0x2e04")
parser.add_argument('-pid',metavar="<pid>", help='[Option] Specify pid, default=0xc025)', default="0xc025")
parser.add_argument('-interface', metavar="<pid>", help='[Option] Specify interface number)', default="")
parser.add_argument('-nokia', help='[Option] Enable Nokia adb backdoor', action='store_true')
args = parser.parse_args()
if args.vid!="":
vid=int(args.vid,16)
if args.pid!="":
pid=int(args.pid,16)
if args.interface!="":
interface=int(args.interface,16)
else:
interface=-1
usbscsi = scsi(vid, pid, interface)
if (usbscsi.connect()):
if args.nokia:
usbscsi.send_fih_adbenable()
usbscsi.send_fih_root()
else:
print("A command is required. Use -h to see options.")
exit(0)
usbscsi.close()
if __name__ == '__main__':
main()
| 33.583333 | 111 | 0.624483 |
import argparse
from Library.usblib import *
def main():
info='MassStorageBackdoor (c) B.Kerler 2019.'
parser = argparse.ArgumentParser(description=info)
print("\n"+info+"\n\n")
parser.add_argument('-vid',metavar="<vid>",help='[Option] Specify vid, default=0x2e04)', default="0x2e04")
parser.add_argument('-pid',metavar="<pid>", help='[Option] Specify pid, default=0xc025)', default="0xc025")
parser.add_argument('-interface', metavar="<pid>", help='[Option] Specify interface number)', default="")
parser.add_argument('-nokia', help='[Option] Enable Nokia adb backdoor', action='store_true')
args = parser.parse_args()
if args.vid!="":
vid=int(args.vid,16)
if args.pid!="":
pid=int(args.pid,16)
if args.interface!="":
interface=int(args.interface,16)
else:
interface=-1
usbscsi = scsi(vid, pid, interface)
if (usbscsi.connect()):
if args.nokia:
usbscsi.send_fih_adbenable()
usbscsi.send_fih_root()
else:
print("A command is required. Use -h to see options.")
exit(0)
usbscsi.close()
if __name__ == '__main__':
main()
| true | true |
f71f191d2d5dd4ef2234008a0e40b14db9e6a422 | 228 | py | Python | tests/samples/issue-274-support-one-package-without-package-dir/setup.py | mlasch/scikit-build | 664dd9c41cc54047d6d648b0466d525573da5a94 | [
"MIT"
] | 299 | 2015-10-19T22:45:08.000Z | 2022-03-30T21:15:55.000Z | tests/samples/issue-274-support-one-package-without-package-dir/setup.py | mlasch/scikit-build | 664dd9c41cc54047d6d648b0466d525573da5a94 | [
"MIT"
] | 588 | 2015-09-17T04:26:59.000Z | 2022-03-29T14:51:54.000Z | tests/samples/issue-274-support-one-package-without-package-dir/setup.py | mlasch/scikit-build | 664dd9c41cc54047d6d648b0466d525573da5a94 | [
"MIT"
] | 102 | 2015-10-19T22:45:13.000Z | 2022-03-20T21:09:08.000Z | from skbuild import setup
setup(
name="hello",
version="1.2.3",
description="a minimal example package",
author='The scikit-build team',
license="MIT",
packages=['hello'],
test_suite='hello_tests'
)
| 19 | 44 | 0.644737 | from skbuild import setup
setup(
name="hello",
version="1.2.3",
description="a minimal example package",
author='The scikit-build team',
license="MIT",
packages=['hello'],
test_suite='hello_tests'
)
| true | true |
f71f1a05e28fe8ec2782d3b027061351db27751e | 10,516 | py | Python | tartiflette/execution/collect.py | remorses/tartiflette-whl | 92bed13de130a7a88278d7019314135e01281259 | [
"MIT"
] | null | null | null | tartiflette/execution/collect.py | remorses/tartiflette-whl | 92bed13de130a7a88278d7019314135e01281259 | [
"MIT"
] | null | null | null | tartiflette/execution/collect.py | remorses/tartiflette-whl | 92bed13de130a7a88278d7019314135e01281259 | [
"MIT"
] | null | null | null | from functools import lru_cache
from typing import Dict, List, Optional, Set, Tuple, Union
from tartiflette.execution.nodes.variable_definition import (
variable_definition_node_to_executable,
)
from tartiflette.language.ast import (
FieldNode,
FragmentSpreadNode,
InlineFragmentNode,
)
from tartiflette.language.parsers.libgraphqlparser import parse_to_document
from tartiflette.types.exceptions.tartiflette import (
SkipCollection,
TartifletteError,
)
from tartiflette.types.helpers.get_directive_instances import (
compute_directive_nodes,
)
from tartiflette.utils.directives import wraps_with_directives
from tartiflette.utils.errors import to_graphql_error
from tartiflette.utils.type_from_ast import schema_type_from_ast
__all__ = (
"parse_and_validate_query",
"collect_executable_variable_definitions",
"collect_fields",
"collect_subfields",
)
@lru_cache(maxsize=512)
def parse_and_validate_query(
query: Union[str, bytes], schema: "GraphQLSchema"
) -> Tuple[Optional["DocumentNode"], Optional[List["TartifletteError"]]]:
"""
Analyzes & validates a query by converting it to a DocumentNode.
:param query: the GraphQL request / query as UTF8-encoded string
:type query: Union[str, bytes]
:param schema: the GraphQLSchema instance linked to the engine
:type schema: GraphQLSchema
:return: a DocumentNode representing the query
:rtype: Tuple[Optional[DocumentNode], Optional[List[TartifletteError]]]
"""
try:
document: "DocumentNode" = parse_to_document(query, schema)
except TartifletteError as e:
return None, [e]
except Exception as e: # pylint: disable=broad-except
return (
None,
[to_graphql_error(e, message="Server encountered an error.")],
)
if document.validators.errors:
return None, document.validators.errors
return document, None
@lru_cache(maxsize=512)
def collect_executable_variable_definitions(
schema: "GraphQLSchema",
document: "DocumentNode",
operation: "OperationDefinitionNode",
) -> List["ExecutableVariableDefinition"]:
"""
Go recursively through all variable definition AST nodes to convert them as
executable variable definition.
:param schema: the GraphQLSchema instance linked to the engine
:param document: the DocumentNode instance linked to the GraphQL request
:param operation: the AST operation definition node to execute
:type schema: GraphQLSchema
:type document: DocumentNode
:type operation: OperationDefinitionNode
:return: a list of executable variable definition
:rtype: List[ExecutableVariableDefinition]
"""
# pylint: disable=unused-argument
if not operation.variable_definitions:
return []
return [
variable_definition_node_to_executable(
schema, variable_definition_node
)
for variable_definition_node in operation.variable_definitions
]
async def should_include_node(
execution_context: "ExecutionContext",
node: Union["FragmentSpreadNode", "FieldNode", "InlineFragmentNode"],
) -> bool:
"""
Determines if a field should be included based on the @include and @skip
directives, where @skip has higher precedence than @include.
:param execution_context: instance of the query execution context
:param node: the selection node to collect or skip
:type execution_context: ExecutionContext
:type node: Union[FragmentSpreadNode, FieldNode, InlineFragmentNode]
:return: whether or not the node should be collected or skipped
:rtype: bool
"""
if not node.directives:
return True
hook_name = (
"on_field_collection"
if isinstance(node, FieldNode)
else (
"on_fragment_spread_collection"
if isinstance(node, FragmentSpreadNode)
else "on_inline_fragment_collection"
)
)
try:
await wraps_with_directives(
directives_definition=compute_directive_nodes(
execution_context.schema,
node.directives,
execution_context.variable_values,
),
directive_hook=hook_name,
with_default=True,
)(
node,
execution_context.context,
context_coercer=execution_context.context,
)
except SkipCollection:
return False
except Exception: # pylint: disable=broad-except
# TODO: we should store unexpected exception in order to treat them as
# field result on execution to handle them the same way as resolved
# value and having the bubble up error and so on.
return False
return True
def get_field_entry_key(node: "FieldNode") -> str:
"""
Implements the logic to compute the key of a given field's entry.
:param node: field node from which to extract the entry key
:type node: FieldNode
:return: the field entry key
:rtype: str
"""
return node.alias.value if node.alias else node.name.value
def does_fragment_condition_match(
execution_context: "ExecutionContext",
fragment_node: Union["FragmentDefinitionNode", "InlineFragmentNode"],
graphql_object_type: "GraphQLObjectType",
) -> bool:
"""
Determines if a fragment is applicable to the given type.
:param execution_context: instance of the query execution context
:param fragment_node: fragment node to check
:param graphql_object_type: GraphQLObjectType to check against with
:type execution_context: ExecutionContext
:type fragment_node: Union[FragmentDefinitionNode, InlineFragmentNode]
:type graphql_object_type: GraphQLObjectType
:return: whether or not the fragment is applicable to the given type
:rtype: bool
"""
type_condition_node = fragment_node.type_condition
if not type_condition_node:
return True
conditional_type = schema_type_from_ast(
execution_context.schema, type_condition_node
)
if conditional_type is graphql_object_type:
return True
return (
conditional_type.is_abstract_type
and conditional_type.is_possible_type(graphql_object_type)
)
async def collect_fields(
execution_context: "ExecutionContext",
runtime_type: "GraphQLObjectType",
selection_set: "SelectionSetNode",
fields: Optional[Dict[str, List["FieldNode"]]] = None,
visited_fragment_names: Optional[Set[str]] = None,
) -> Dict[str, List["FieldNode"]]:
"""
Given a SelectionSet, adds all of the fields in that selection to
the passed in map of fields, and returns it at the end.
CollectFields requires the "runtime type" of an object. For a field which
returns an Interface or Union type, the "runtime type" will be the actual
Object type returned by that field.
:param execution_context: instance of the query execution context
:param runtime_type: current runtime type of the selection set
:param selection_set: selection set node to parse
:param fields: dictionary of collected fields
:param visited_fragment_names: the set of fragment names already visited
:type execution_context: ExecutionContext
:type runtime_type: GraphQLObjectType
:type selection_set: SelectionSetNode
:type fields: Optional[Dict[str, List[FieldNode]]]
:type visited_fragment_names: Optional[Set[str]]
:return: the dictionary of collected fields
:rtype: Dict[str, List[FieldNode]]
"""
# pylint: disable=too-complex
if fields is None:
fields: Dict[str, "FieldNode"] = {}
if visited_fragment_names is None:
visited_fragment_names: Set[str] = set()
for selection in selection_set.selections:
if isinstance(selection, FieldNode):
if not await should_include_node(execution_context, selection):
continue
fields.setdefault(get_field_entry_key(selection), []).append(
selection
)
elif isinstance(selection, InlineFragmentNode):
if not await should_include_node(
execution_context, selection
) or not does_fragment_condition_match(
execution_context, selection, runtime_type
):
continue
await collect_fields(
execution_context,
runtime_type,
selection.selection_set,
fields,
visited_fragment_names,
)
elif isinstance(selection, FragmentSpreadNode):
fragment_name = selection.name.value
if (
fragment_name in visited_fragment_names
or not await should_include_node(execution_context, selection)
):
continue
visited_fragment_names.add(fragment_name)
fragment_definition = execution_context.fragments[fragment_name]
if not fragment_definition or not does_fragment_condition_match(
execution_context, fragment_definition, runtime_type
):
continue
await collect_fields(
execution_context,
runtime_type,
fragment_definition.selection_set,
fields,
visited_fragment_names,
)
return fields
async def collect_subfields(
execution_context: "ExecutionContext",
return_type: "GraphQLOutputType",
field_nodes: List["FieldNode"],
) -> Dict[str, List["FieldNode"]]:
"""
Collects the fields of each field nodes.
:param execution_context: instance of the query execution context
:param return_type: GraphQLOutputType of the parent field
:param field_nodes: AST nodes related to the parent field
:type execution_context: ExecutionContext
:type return_type: GraphQLOutputType
:type field_nodes: List[FieldNode]
:return: the dictionary of collected fields
:rtype: Dict[str, List[FieldNode]]
"""
subfield_nodes: Dict[str, List["FieldNode"]] = {}
visited_fragment_names: Set[str] = set()
for field_node in field_nodes:
selection_set = field_node.selection_set
if selection_set:
subfield_nodes = await collect_fields(
execution_context,
return_type,
selection_set,
subfield_nodes,
visited_fragment_names,
)
return subfield_nodes
| 35.527027 | 79 | 0.687143 | from functools import lru_cache
from typing import Dict, List, Optional, Set, Tuple, Union
from tartiflette.execution.nodes.variable_definition import (
variable_definition_node_to_executable,
)
from tartiflette.language.ast import (
FieldNode,
FragmentSpreadNode,
InlineFragmentNode,
)
from tartiflette.language.parsers.libgraphqlparser import parse_to_document
from tartiflette.types.exceptions.tartiflette import (
SkipCollection,
TartifletteError,
)
from tartiflette.types.helpers.get_directive_instances import (
compute_directive_nodes,
)
from tartiflette.utils.directives import wraps_with_directives
from tartiflette.utils.errors import to_graphql_error
from tartiflette.utils.type_from_ast import schema_type_from_ast
__all__ = (
"parse_and_validate_query",
"collect_executable_variable_definitions",
"collect_fields",
"collect_subfields",
)
@lru_cache(maxsize=512)
def parse_and_validate_query(
query: Union[str, bytes], schema: "GraphQLSchema"
) -> Tuple[Optional["DocumentNode"], Optional[List["TartifletteError"]]]:
try:
document: "DocumentNode" = parse_to_document(query, schema)
except TartifletteError as e:
return None, [e]
except Exception as e:
return (
None,
[to_graphql_error(e, message="Server encountered an error.")],
)
if document.validators.errors:
return None, document.validators.errors
return document, None
@lru_cache(maxsize=512)
def collect_executable_variable_definitions(
schema: "GraphQLSchema",
document: "DocumentNode",
operation: "OperationDefinitionNode",
) -> List["ExecutableVariableDefinition"]:
if not operation.variable_definitions:
return []
return [
variable_definition_node_to_executable(
schema, variable_definition_node
)
for variable_definition_node in operation.variable_definitions
]
async def should_include_node(
execution_context: "ExecutionContext",
node: Union["FragmentSpreadNode", "FieldNode", "InlineFragmentNode"],
) -> bool:
if not node.directives:
return True
hook_name = (
"on_field_collection"
if isinstance(node, FieldNode)
else (
"on_fragment_spread_collection"
if isinstance(node, FragmentSpreadNode)
else "on_inline_fragment_collection"
)
)
try:
await wraps_with_directives(
directives_definition=compute_directive_nodes(
execution_context.schema,
node.directives,
execution_context.variable_values,
),
directive_hook=hook_name,
with_default=True,
)(
node,
execution_context.context,
context_coercer=execution_context.context,
)
except SkipCollection:
return False
except Exception:
return False
return True
def get_field_entry_key(node: "FieldNode") -> str:
return node.alias.value if node.alias else node.name.value
def does_fragment_condition_match(
execution_context: "ExecutionContext",
fragment_node: Union["FragmentDefinitionNode", "InlineFragmentNode"],
graphql_object_type: "GraphQLObjectType",
) -> bool:
type_condition_node = fragment_node.type_condition
if not type_condition_node:
return True
conditional_type = schema_type_from_ast(
execution_context.schema, type_condition_node
)
if conditional_type is graphql_object_type:
return True
return (
conditional_type.is_abstract_type
and conditional_type.is_possible_type(graphql_object_type)
)
async def collect_fields(
execution_context: "ExecutionContext",
runtime_type: "GraphQLObjectType",
selection_set: "SelectionSetNode",
fields: Optional[Dict[str, List["FieldNode"]]] = None,
visited_fragment_names: Optional[Set[str]] = None,
) -> Dict[str, List["FieldNode"]]:
if fields is None:
fields: Dict[str, "FieldNode"] = {}
if visited_fragment_names is None:
visited_fragment_names: Set[str] = set()
for selection in selection_set.selections:
if isinstance(selection, FieldNode):
if not await should_include_node(execution_context, selection):
continue
fields.setdefault(get_field_entry_key(selection), []).append(
selection
)
elif isinstance(selection, InlineFragmentNode):
if not await should_include_node(
execution_context, selection
) or not does_fragment_condition_match(
execution_context, selection, runtime_type
):
continue
await collect_fields(
execution_context,
runtime_type,
selection.selection_set,
fields,
visited_fragment_names,
)
elif isinstance(selection, FragmentSpreadNode):
fragment_name = selection.name.value
if (
fragment_name in visited_fragment_names
or not await should_include_node(execution_context, selection)
):
continue
visited_fragment_names.add(fragment_name)
fragment_definition = execution_context.fragments[fragment_name]
if not fragment_definition or not does_fragment_condition_match(
execution_context, fragment_definition, runtime_type
):
continue
await collect_fields(
execution_context,
runtime_type,
fragment_definition.selection_set,
fields,
visited_fragment_names,
)
return fields
async def collect_subfields(
execution_context: "ExecutionContext",
return_type: "GraphQLOutputType",
field_nodes: List["FieldNode"],
) -> Dict[str, List["FieldNode"]]:
subfield_nodes: Dict[str, List["FieldNode"]] = {}
visited_fragment_names: Set[str] = set()
for field_node in field_nodes:
selection_set = field_node.selection_set
if selection_set:
subfield_nodes = await collect_fields(
execution_context,
return_type,
selection_set,
subfield_nodes,
visited_fragment_names,
)
return subfield_nodes
| true | true |
f71f1a8bc82d2d2ee616d7db40d3b03f67a2b9bb | 197 | py | Python | ietf/utils/models.py | wpjesus/codematch | eee7405259cce9239ea0545a2a1300ee1accfe94 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2015-09-02T19:53:12.000Z | 2015-09-02T19:53:12.000Z | ietf/utils/models.py | wpjesus/codematch | eee7405259cce9239ea0545a2a1300ee1accfe94 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | ietf/utils/models.py | wpjesus/codematch | eee7405259cce9239ea0545a2a1300ee1accfe94 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | # Copyright The IETF Trust 2015, All Rights Reserved
from django.db import models
class DumpInfo(models.Model):
date = models.DateTimeField()
host = models.CharField(max_length=128)
| 21.888889 | 52 | 0.736041 |
from django.db import models
class DumpInfo(models.Model):
date = models.DateTimeField()
host = models.CharField(max_length=128)
| true | true |
f71f1ba39c37b17eb6607ab6ec5ad71e37435d39 | 32,775 | py | Python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_07_01/aio/operations/_gallery_image_versions_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_07_01/aio/operations/_gallery_image_versions_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_07_01/aio/operations/_gallery_image_versions_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class GalleryImageVersionsOperations:
"""GalleryImageVersionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2021_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
gallery_image_version_name: str,
gallery_image_version: "_models.GalleryImageVersion",
**kwargs: Any
) -> "_models.GalleryImageVersion":
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryImageVersion"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
'galleryImageName': self._serialize.url("gallery_image_name", gallery_image_name, 'str'),
'galleryImageVersionName': self._serialize.url("gallery_image_version_name", gallery_image_version_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(gallery_image_version, 'GalleryImageVersion')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('GalleryImageVersion', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('GalleryImageVersion', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('GalleryImageVersion', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions/{galleryImageVersionName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
gallery_image_version_name: str,
gallery_image_version: "_models.GalleryImageVersion",
**kwargs: Any
) -> AsyncLROPoller["_models.GalleryImageVersion"]:
"""Create or update a gallery image version.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Image Gallery in which the Image Definition
resides.
:type gallery_name: str
:param gallery_image_name: The name of the gallery image definition in which the Image Version
is to be created.
:type gallery_image_name: str
:param gallery_image_version_name: The name of the gallery image version to be created. Needs
to follow semantic version name pattern: The allowed characters are digit and period. Digits
must be within the range of a 32-bit integer. Format:
:code:`<MajorVersion>`.:code:`<MinorVersion>`.:code:`<Patch>`.
:type gallery_image_version_name: str
:param gallery_image_version: Parameters supplied to the create or update gallery image version
operation.
:type gallery_image_version: ~azure.mgmt.compute.v2021_07_01.models.GalleryImageVersion
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either GalleryImageVersion or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_07_01.models.GalleryImageVersion]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryImageVersion"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_image_name=gallery_image_name,
gallery_image_version_name=gallery_image_version_name,
gallery_image_version=gallery_image_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('GalleryImageVersion', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
'galleryImageName': self._serialize.url("gallery_image_name", gallery_image_name, 'str'),
'galleryImageVersionName': self._serialize.url("gallery_image_version_name", gallery_image_version_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions/{galleryImageVersionName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
gallery_image_version_name: str,
gallery_image_version: "_models.GalleryImageVersionUpdate",
**kwargs: Any
) -> "_models.GalleryImageVersion":
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryImageVersion"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
'galleryImageName': self._serialize.url("gallery_image_name", gallery_image_name, 'str'),
'galleryImageVersionName': self._serialize.url("gallery_image_version_name", gallery_image_version_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(gallery_image_version, 'GalleryImageVersionUpdate')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GalleryImageVersion', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions/{galleryImageVersionName}'} # type: ignore
async def begin_update(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
gallery_image_version_name: str,
gallery_image_version: "_models.GalleryImageVersionUpdate",
**kwargs: Any
) -> AsyncLROPoller["_models.GalleryImageVersion"]:
"""Update a gallery image version.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Image Gallery in which the Image Definition
resides.
:type gallery_name: str
:param gallery_image_name: The name of the gallery image definition in which the Image Version
is to be updated.
:type gallery_image_name: str
:param gallery_image_version_name: The name of the gallery image version to be updated. Needs
to follow semantic version name pattern: The allowed characters are digit and period. Digits
must be within the range of a 32-bit integer. Format:
:code:`<MajorVersion>`.:code:`<MinorVersion>`.:code:`<Patch>`.
:type gallery_image_version_name: str
:param gallery_image_version: Parameters supplied to the update gallery image version
operation.
:type gallery_image_version: ~azure.mgmt.compute.v2021_07_01.models.GalleryImageVersionUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either GalleryImageVersion or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_07_01.models.GalleryImageVersion]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryImageVersion"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_image_name=gallery_image_name,
gallery_image_version_name=gallery_image_version_name,
gallery_image_version=gallery_image_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('GalleryImageVersion', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
'galleryImageName': self._serialize.url("gallery_image_name", gallery_image_name, 'str'),
'galleryImageVersionName': self._serialize.url("gallery_image_version_name", gallery_image_version_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions/{galleryImageVersionName}'} # type: ignore
async def get(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
gallery_image_version_name: str,
expand: Optional[Union[str, "_models.ReplicationStatusTypes"]] = None,
**kwargs: Any
) -> "_models.GalleryImageVersion":
"""Retrieves information about a gallery image version.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Image Gallery in which the Image Definition
resides.
:type gallery_name: str
:param gallery_image_name: The name of the gallery image definition in which the Image Version
resides.
:type gallery_image_name: str
:param gallery_image_version_name: The name of the gallery image version to be retrieved.
:type gallery_image_version_name: str
:param expand: The expand expression to apply on the operation.
:type expand: str or ~azure.mgmt.compute.v2021_07_01.models.ReplicationStatusTypes
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GalleryImageVersion, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_07_01.models.GalleryImageVersion
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryImageVersion"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
'galleryImageName': self._serialize.url("gallery_image_name", gallery_image_name, 'str'),
'galleryImageVersionName': self._serialize.url("gallery_image_version_name", gallery_image_version_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GalleryImageVersion', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions/{galleryImageVersionName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
gallery_image_version_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-07-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
'galleryImageName': self._serialize.url("gallery_image_name", gallery_image_name, 'str'),
'galleryImageVersionName': self._serialize.url("gallery_image_version_name", gallery_image_version_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions/{galleryImageVersionName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
gallery_image_version_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Delete a gallery image version.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Image Gallery in which the Image Definition
resides.
:type gallery_name: str
:param gallery_image_name: The name of the gallery image definition in which the Image Version
resides.
:type gallery_image_name: str
:param gallery_image_version_name: The name of the gallery image version to be deleted.
:type gallery_image_version_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_image_name=gallery_image_name,
gallery_image_version_name=gallery_image_version_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
'galleryImageName': self._serialize.url("gallery_image_name", gallery_image_name, 'str'),
'galleryImageVersionName': self._serialize.url("gallery_image_version_name", gallery_image_version_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions/{galleryImageVersionName}'} # type: ignore
def list_by_gallery_image(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
**kwargs: Any
) -> AsyncIterable["_models.GalleryImageVersionList"]:
"""List gallery image versions in a gallery image definition.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param gallery_name: The name of the Shared Image Gallery in which the Image Definition
resides.
:type gallery_name: str
:param gallery_image_name: The name of the Shared Image Gallery Image Definition from which the
Image Versions are to be listed.
:type gallery_image_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either GalleryImageVersionList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2021_07_01.models.GalleryImageVersionList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GalleryImageVersionList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_gallery_image.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
'galleryImageName': self._serialize.url("gallery_image_name", gallery_image_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('GalleryImageVersionList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_gallery_image.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions'} # type: ignore
| 53.119935 | 247 | 0.681983 |
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class GalleryImageVersionsOperations:
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
gallery_image_version_name: str,
gallery_image_version: "_models.GalleryImageVersion",
**kwargs: Any
) -> "_models.GalleryImageVersion":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self._create_or_update_initial.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
'galleryImageName': self._serialize.url("gallery_image_name", gallery_image_name, 'str'),
'galleryImageVersionName': self._serialize.url("gallery_image_version_name", gallery_image_version_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(gallery_image_version, 'GalleryImageVersion')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('GalleryImageVersion', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('GalleryImageVersion', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('GalleryImageVersion', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions/{galleryImageVersionName}'}
async def begin_create_or_update(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
gallery_image_version_name: str,
gallery_image_version: "_models.GalleryImageVersion",
**kwargs: Any
) -> AsyncLROPoller["_models.GalleryImageVersion"]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_image_name=gallery_image_name,
gallery_image_version_name=gallery_image_version_name,
gallery_image_version=gallery_image_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('GalleryImageVersion', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
'galleryImageName': self._serialize.url("gallery_image_name", gallery_image_name, 'str'),
'galleryImageVersionName': self._serialize.url("gallery_image_version_name", gallery_image_version_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions/{galleryImageVersionName}'}
async def _update_initial(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
gallery_image_version_name: str,
gallery_image_version: "_models.GalleryImageVersionUpdate",
**kwargs: Any
) -> "_models.GalleryImageVersion":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self._update_initial.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
'galleryImageName': self._serialize.url("gallery_image_name", gallery_image_name, 'str'),
'galleryImageVersionName': self._serialize.url("gallery_image_version_name", gallery_image_version_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(gallery_image_version, 'GalleryImageVersionUpdate')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GalleryImageVersion', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions/{galleryImageVersionName}'}
async def begin_update(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
gallery_image_version_name: str,
gallery_image_version: "_models.GalleryImageVersionUpdate",
**kwargs: Any
) -> AsyncLROPoller["_models.GalleryImageVersion"]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_image_name=gallery_image_name,
gallery_image_version_name=gallery_image_version_name,
gallery_image_version=gallery_image_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('GalleryImageVersion', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
'galleryImageName': self._serialize.url("gallery_image_name", gallery_image_name, 'str'),
'galleryImageVersionName': self._serialize.url("gallery_image_version_name", gallery_image_version_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions/{galleryImageVersionName}'}
async def get(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
gallery_image_version_name: str,
expand: Optional[Union[str, "_models.ReplicationStatusTypes"]] = None,
**kwargs: Any
) -> "_models.GalleryImageVersion":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-07-01"
accept = "application/json"
url = self.get.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
'galleryImageName': self._serialize.url("gallery_image_name", gallery_image_name, 'str'),
'galleryImageVersionName': self._serialize.url("gallery_image_version_name", gallery_image_version_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GalleryImageVersion', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions/{galleryImageVersionName}'}
async def _delete_initial(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
gallery_image_version_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-07-01"
accept = "application/json"
url = self._delete_initial.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
'galleryImageName': self._serialize.url("gallery_image_name", gallery_image_name, 'str'),
'galleryImageVersionName': self._serialize.url("gallery_image_version_name", gallery_image_version_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions/{galleryImageVersionName}'}
async def begin_delete(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
gallery_image_version_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
gallery_name=gallery_name,
gallery_image_name=gallery_image_name,
gallery_image_version_name=gallery_image_version_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
'galleryImageName': self._serialize.url("gallery_image_name", gallery_image_name, 'str'),
'galleryImageVersionName': self._serialize.url("gallery_image_version_name", gallery_image_version_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions/{galleryImageVersionName}'}
def list_by_gallery_image(
self,
resource_group_name: str,
gallery_name: str,
gallery_image_name: str,
**kwargs: Any
) -> AsyncIterable["_models.GalleryImageVersionList"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-07-01"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list_by_gallery_image.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'galleryName': self._serialize.url("gallery_name", gallery_name, 'str'),
'galleryImageName': self._serialize.url("gallery_image_name", gallery_image_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('GalleryImageVersionList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_gallery_image.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/images/{galleryImageName}/versions'}
| true | true |
f71f1d3378d6065dda7b43ab34672dde211e3e2f | 6,011 | py | Python | tests/scripts/thread-cert/Cert_9_2_18_RollBackActiveTimestamp.py | ctan-g/openthread | 376f35a49e5c0a5b8170c117d7a930e3a8b3b210 | [
"BSD-3-Clause"
] | 1 | 2020-08-12T06:15:53.000Z | 2020-08-12T06:15:53.000Z | tests/scripts/thread-cert/Cert_9_2_18_RollBackActiveTimestamp.py | ctan-g/openthread | 376f35a49e5c0a5b8170c117d7a930e3a8b3b210 | [
"BSD-3-Clause"
] | null | null | null | tests/scripts/thread-cert/Cert_9_2_18_RollBackActiveTimestamp.py | ctan-g/openthread | 376f35a49e5c0a5b8170c117d7a930e3a8b3b210 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import config
import thread_cert
KEY1 = '00112233445566778899aabbccddeeff'
KEY2 = 'ffeeddccbbaa99887766554433221100'
CHANNEL_INIT = 19
PANID_INIT = 0xface
COMMISSIONER = 1
LEADER = 2
ROUTER1 = 3
ROUTER2 = 4
ED1 = 5
SED1 = 6
MTDS = [ED1, SED1]
class Cert_9_2_18_RollBackActiveTimestamp(thread_cert.TestCase):
SUPPORT_NCP = False
TOPOLOGY = {
COMMISSIONER: {
'active_dataset': {
'timestamp': 1,
'panid': PANID_INIT,
'channel': CHANNEL_INIT,
'master_key': '00112233445566778899aabbccddeeff'
},
'mode': 'rsdn',
'router_selection_jitter': 1,
'whitelist': [LEADER]
},
LEADER: {
'active_dataset': {
'timestamp': 1,
'panid': PANID_INIT,
'channel': CHANNEL_INIT,
'master_key': '00112233445566778899aabbccddeeff'
},
'mode': 'rsdn',
'partition_id': 0xffffffff,
'router_selection_jitter': 1,
'whitelist': [COMMISSIONER, ROUTER1]
},
ROUTER1: {
'active_dataset': {
'timestamp': 1,
'panid': PANID_INIT,
'channel': CHANNEL_INIT,
'master_key': '00112233445566778899aabbccddeeff'
},
'mode': 'rsdn',
'router_selection_jitter': 1,
'whitelist': [LEADER, ROUTER2, ED1, SED1]
},
ROUTER2: {
'active_dataset': {
'timestamp': 1,
'panid': PANID_INIT,
'channel': CHANNEL_INIT,
'master_key': '00112233445566778899aabbccddeeff'
},
'mode': 'rsdn',
'router_selection_jitter': 1,
'whitelist': [ROUTER1]
},
ED1: {
'channel': CHANNEL_INIT,
'is_mtd': True,
'masterkey': '00112233445566778899aabbccddeeff',
'mode': 'rsn',
'panid': PANID_INIT,
'whitelist': [ROUTER1]
},
SED1: {
'channel': CHANNEL_INIT,
'is_mtd': True,
'masterkey': '00112233445566778899aabbccddeeff',
'mode': 's',
'panid': PANID_INIT,
'timeout': config.DEFAULT_CHILD_TIMEOUT,
'whitelist': [ROUTER1]
},
}
def test(self):
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[COMMISSIONER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[COMMISSIONER].get_state(), 'router')
self.nodes[COMMISSIONER].commissioner_start()
self.simulator.go(3)
self.nodes[ROUTER1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
self.nodes[ED1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ED1].get_state(), 'child')
self.nodes[SED1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[SED1].get_state(), 'child')
self.nodes[COMMISSIONER].send_mgmt_active_set(active_timestamp=20000, network_name='GRL')
self.simulator.go(5)
self.nodes[COMMISSIONER].send_mgmt_pending_set(
pending_timestamp=20,
active_timestamp=20,
delay_timer=20000,
network_name='Shouldnotbe',
)
self.simulator.go(5)
self.nodes[COMMISSIONER].send_mgmt_pending_set(
pending_timestamp=20,
active_timestamp=20,
delay_timer=20000,
network_name='MyHouse',
master_key=KEY2,
)
self.simulator.go(310)
self.assertEqual(self.nodes[COMMISSIONER].get_masterkey(), KEY2)
self.assertEqual(self.nodes[LEADER].get_masterkey(), KEY2)
self.assertEqual(self.nodes[ROUTER1].get_masterkey(), KEY2)
self.assertEqual(self.nodes[ED1].get_masterkey(), KEY2)
self.assertEqual(self.nodes[SED1].get_masterkey(), KEY2)
self.assertEqual(self.nodes[ROUTER2].get_masterkey(), KEY1)
self.nodes[ROUTER2].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER2].get_state(), 'leader')
if __name__ == '__main__':
unittest.main()
| 34.153409 | 97 | 0.613875 |
import unittest
import config
import thread_cert
KEY1 = '00112233445566778899aabbccddeeff'
KEY2 = 'ffeeddccbbaa99887766554433221100'
CHANNEL_INIT = 19
PANID_INIT = 0xface
COMMISSIONER = 1
LEADER = 2
ROUTER1 = 3
ROUTER2 = 4
ED1 = 5
SED1 = 6
MTDS = [ED1, SED1]
class Cert_9_2_18_RollBackActiveTimestamp(thread_cert.TestCase):
SUPPORT_NCP = False
TOPOLOGY = {
COMMISSIONER: {
'active_dataset': {
'timestamp': 1,
'panid': PANID_INIT,
'channel': CHANNEL_INIT,
'master_key': '00112233445566778899aabbccddeeff'
},
'mode': 'rsdn',
'router_selection_jitter': 1,
'whitelist': [LEADER]
},
LEADER: {
'active_dataset': {
'timestamp': 1,
'panid': PANID_INIT,
'channel': CHANNEL_INIT,
'master_key': '00112233445566778899aabbccddeeff'
},
'mode': 'rsdn',
'partition_id': 0xffffffff,
'router_selection_jitter': 1,
'whitelist': [COMMISSIONER, ROUTER1]
},
ROUTER1: {
'active_dataset': {
'timestamp': 1,
'panid': PANID_INIT,
'channel': CHANNEL_INIT,
'master_key': '00112233445566778899aabbccddeeff'
},
'mode': 'rsdn',
'router_selection_jitter': 1,
'whitelist': [LEADER, ROUTER2, ED1, SED1]
},
ROUTER2: {
'active_dataset': {
'timestamp': 1,
'panid': PANID_INIT,
'channel': CHANNEL_INIT,
'master_key': '00112233445566778899aabbccddeeff'
},
'mode': 'rsdn',
'router_selection_jitter': 1,
'whitelist': [ROUTER1]
},
ED1: {
'channel': CHANNEL_INIT,
'is_mtd': True,
'masterkey': '00112233445566778899aabbccddeeff',
'mode': 'rsn',
'panid': PANID_INIT,
'whitelist': [ROUTER1]
},
SED1: {
'channel': CHANNEL_INIT,
'is_mtd': True,
'masterkey': '00112233445566778899aabbccddeeff',
'mode': 's',
'panid': PANID_INIT,
'timeout': config.DEFAULT_CHILD_TIMEOUT,
'whitelist': [ROUTER1]
},
}
def test(self):
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[COMMISSIONER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[COMMISSIONER].get_state(), 'router')
self.nodes[COMMISSIONER].commissioner_start()
self.simulator.go(3)
self.nodes[ROUTER1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER1].get_state(), 'router')
self.nodes[ED1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ED1].get_state(), 'child')
self.nodes[SED1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[SED1].get_state(), 'child')
self.nodes[COMMISSIONER].send_mgmt_active_set(active_timestamp=20000, network_name='GRL')
self.simulator.go(5)
self.nodes[COMMISSIONER].send_mgmt_pending_set(
pending_timestamp=20,
active_timestamp=20,
delay_timer=20000,
network_name='Shouldnotbe',
)
self.simulator.go(5)
self.nodes[COMMISSIONER].send_mgmt_pending_set(
pending_timestamp=20,
active_timestamp=20,
delay_timer=20000,
network_name='MyHouse',
master_key=KEY2,
)
self.simulator.go(310)
self.assertEqual(self.nodes[COMMISSIONER].get_masterkey(), KEY2)
self.assertEqual(self.nodes[LEADER].get_masterkey(), KEY2)
self.assertEqual(self.nodes[ROUTER1].get_masterkey(), KEY2)
self.assertEqual(self.nodes[ED1].get_masterkey(), KEY2)
self.assertEqual(self.nodes[SED1].get_masterkey(), KEY2)
self.assertEqual(self.nodes[ROUTER2].get_masterkey(), KEY1)
self.nodes[ROUTER2].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER2].get_state(), 'leader')
if __name__ == '__main__':
unittest.main()
| true | true |
f71f1d706938ce52c44b987b1c1a221da711ca97 | 5,984 | py | Python | evalml/tests/integration_tests/test_data_checks_and_actions_integration.py | ColinRTaylor/evalml | ef4374494b50e22757f44edb753e54efbf71f430 | [
"BSD-3-Clause"
] | null | null | null | evalml/tests/integration_tests/test_data_checks_and_actions_integration.py | ColinRTaylor/evalml | ef4374494b50e22757f44edb753e54efbf71f430 | [
"BSD-3-Clause"
] | 1 | 2022-02-19T12:59:09.000Z | 2022-02-19T12:59:09.000Z | evalml/tests/integration_tests/test_data_checks_and_actions_integration.py | isabella232/evalml | 5b372d0dfac05ff9b7e41eb494a9df1bf2da4a9d | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from pandas.testing import assert_frame_equal, assert_series_equal
from evalml.automl import get_default_primary_search_objective
from evalml.data_checks import DefaultDataChecks, OutliersDataCheck
from evalml.data_checks.invalid_target_data_check import InvalidTargetDataCheck
from evalml.data_checks.null_data_check import NullDataCheck
from evalml.pipelines import BinaryClassificationPipeline
from evalml.pipelines.components import (
DropColumns,
DropRowsTransformer,
TargetImputer,
)
from evalml.pipelines.components.transformers.imputers.per_column_imputer import (
PerColumnImputer,
)
from evalml.pipelines.multiclass_classification_pipeline import (
MulticlassClassificationPipeline,
)
from evalml.pipelines.regression_pipeline import RegressionPipeline
from evalml.pipelines.utils import make_pipeline_from_data_check_output
def test_data_checks_with_healthy_data(X_y_binary):
# Checks do not return any error.
X, y = X_y_binary
data_check = DefaultDataChecks(
"binary", get_default_primary_search_objective("binary")
)
data_checks_output = data_check.validate(X, y)
assert make_pipeline_from_data_check_output(
"binary", data_checks_output
) == BinaryClassificationPipeline(component_graph={}, parameters={}, random_seed=0)
def test_data_checks_suggests_drop_and_impute_cols():
X = pd.DataFrame(
{
"null_with_categorical": ["a", None, "b", "c", "c"],
"lots_of_null": [None, 7, None, 3, 5],
"all_null": [None, None, None, None, None],
"no_null": [1, 2, 3, 4, 5],
}
)
X.ww.init(logical_types={"null_with_categorical": "categorical"})
y = pd.Series([1, 0, 0, 1, 1])
data_check = NullDataCheck()
data_checks_output = data_check.validate(X, y)
action_pipeline = make_pipeline_from_data_check_output("binary", data_checks_output)
assert action_pipeline == BinaryClassificationPipeline(
component_graph={
"Per Column Imputer": [PerColumnImputer, "X", "y"],
"Drop Columns Transformer": [
DropColumns,
"Per Column Imputer.x",
"y",
],
},
parameters={
"Per Column Imputer": {
"impute_strategies": {
"null_with_categorical": {"impute_strategy": "most_frequent"},
"lots_of_null": {"impute_strategy": "mean"},
},
"default_impute_strategy": "most_frequent",
},
"Drop Columns Transformer": {"columns": ["all_null"]},
},
random_seed=0,
)
X_expected = pd.DataFrame(
{
"null_with_categorical": ["a", "c", "b", "c", "c"],
"lots_of_null": [5, 7, 5, 3, 5],
"no_null": [1, 2, 3, 4, 5],
}
)
X_expected.ww.init(
logical_types={"lots_of_null": "double", "null_with_categorical": "categorical"}
)
action_pipeline.fit(X, y)
X_t = action_pipeline.transform(X, y)
assert_frame_equal(X_expected, X_t)
@pytest.mark.parametrize("problem_type", ["binary", "multiclass", "regression"])
def test_data_checks_impute_cols(problem_type):
X = pd.DataFrame()
if problem_type == "binary":
y = ww.init_series(pd.Series([0, 1, 1, None, None]))
objective = "Log Loss Binary"
expected_pipeline_class = BinaryClassificationPipeline
y_expected = ww.init_series(pd.Series([0, 1, 1, 1, 1]), logical_type="double")
elif problem_type == "multiclass":
y = ww.init_series(pd.Series([0, 1, 2, 2, None]))
objective = "Log Loss Multiclass"
expected_pipeline_class = MulticlassClassificationPipeline
y_expected = ww.init_series(pd.Series([0, 1, 2, 2, 2]), logical_type="double")
else:
y = ww.init_series(pd.Series([0, 0.1, 0.2, None, None]))
objective = "R2"
expected_pipeline_class = RegressionPipeline
y_expected = ww.init_series(
pd.Series([0, 0.1, 0.2, 0.1, 0.1]), logical_type="double"
)
data_check = InvalidTargetDataCheck(problem_type, objective)
data_checks_output = data_check.validate(None, y)
action_pipeline = make_pipeline_from_data_check_output(
problem_type, data_checks_output
)
expected_parameters = (
{"Target Imputer": {"impute_strategy": "mean", "fill_value": None}}
if problem_type == "regression"
else {
"Target Imputer": {"impute_strategy": "most_frequent", "fill_value": None}
}
)
assert action_pipeline == expected_pipeline_class(
component_graph={"Target Imputer": [TargetImputer, "X", "y"]},
parameters=expected_parameters,
random_seed=0,
)
action_pipeline.fit(X, y)
_, y_t = action_pipeline.transform(X, y)
assert_series_equal(y_expected, y_t)
def test_data_checks_suggests_drop_rows():
a = np.arange(10) * 0.01
data = np.tile(a, (100, 10))
X = pd.DataFrame(data=data)
X.iloc[0, 3] = 1000
X.iloc[3, 25] = 1000
X.iloc[5, 55] = 10000
X.iloc[10, 72] = -1000
X.iloc[:, 90] = "string_values"
y = pd.Series(np.tile([0, 1], 50))
outliers_check = OutliersDataCheck()
data_checks_output = outliers_check.validate(X)
action_pipeline = make_pipeline_from_data_check_output("binary", data_checks_output)
assert action_pipeline == BinaryClassificationPipeline(
component_graph={"Drop Rows Transformer": [DropRowsTransformer, "X", "y"]},
parameters={"Drop Rows Transformer": {"indices_to_drop": [0, 3, 5, 10]}},
random_seed=0,
)
X_expected = X.drop([0, 3, 5, 10])
X_expected.ww.init()
y_expected = y.drop([0, 3, 5, 10])
action_pipeline.fit(X, y)
X_t, y_t = action_pipeline.transform(X, y)
assert_frame_equal(X_expected, X_t)
assert_series_equal(y_expected, y_t)
| 35.832335 | 88 | 0.653242 | import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from pandas.testing import assert_frame_equal, assert_series_equal
from evalml.automl import get_default_primary_search_objective
from evalml.data_checks import DefaultDataChecks, OutliersDataCheck
from evalml.data_checks.invalid_target_data_check import InvalidTargetDataCheck
from evalml.data_checks.null_data_check import NullDataCheck
from evalml.pipelines import BinaryClassificationPipeline
from evalml.pipelines.components import (
DropColumns,
DropRowsTransformer,
TargetImputer,
)
from evalml.pipelines.components.transformers.imputers.per_column_imputer import (
PerColumnImputer,
)
from evalml.pipelines.multiclass_classification_pipeline import (
MulticlassClassificationPipeline,
)
from evalml.pipelines.regression_pipeline import RegressionPipeline
from evalml.pipelines.utils import make_pipeline_from_data_check_output
def test_data_checks_with_healthy_data(X_y_binary):
X, y = X_y_binary
data_check = DefaultDataChecks(
"binary", get_default_primary_search_objective("binary")
)
data_checks_output = data_check.validate(X, y)
assert make_pipeline_from_data_check_output(
"binary", data_checks_output
) == BinaryClassificationPipeline(component_graph={}, parameters={}, random_seed=0)
def test_data_checks_suggests_drop_and_impute_cols():
X = pd.DataFrame(
{
"null_with_categorical": ["a", None, "b", "c", "c"],
"lots_of_null": [None, 7, None, 3, 5],
"all_null": [None, None, None, None, None],
"no_null": [1, 2, 3, 4, 5],
}
)
X.ww.init(logical_types={"null_with_categorical": "categorical"})
y = pd.Series([1, 0, 0, 1, 1])
data_check = NullDataCheck()
data_checks_output = data_check.validate(X, y)
action_pipeline = make_pipeline_from_data_check_output("binary", data_checks_output)
assert action_pipeline == BinaryClassificationPipeline(
component_graph={
"Per Column Imputer": [PerColumnImputer, "X", "y"],
"Drop Columns Transformer": [
DropColumns,
"Per Column Imputer.x",
"y",
],
},
parameters={
"Per Column Imputer": {
"impute_strategies": {
"null_with_categorical": {"impute_strategy": "most_frequent"},
"lots_of_null": {"impute_strategy": "mean"},
},
"default_impute_strategy": "most_frequent",
},
"Drop Columns Transformer": {"columns": ["all_null"]},
},
random_seed=0,
)
X_expected = pd.DataFrame(
{
"null_with_categorical": ["a", "c", "b", "c", "c"],
"lots_of_null": [5, 7, 5, 3, 5],
"no_null": [1, 2, 3, 4, 5],
}
)
X_expected.ww.init(
logical_types={"lots_of_null": "double", "null_with_categorical": "categorical"}
)
action_pipeline.fit(X, y)
X_t = action_pipeline.transform(X, y)
assert_frame_equal(X_expected, X_t)
@pytest.mark.parametrize("problem_type", ["binary", "multiclass", "regression"])
def test_data_checks_impute_cols(problem_type):
X = pd.DataFrame()
if problem_type == "binary":
y = ww.init_series(pd.Series([0, 1, 1, None, None]))
objective = "Log Loss Binary"
expected_pipeline_class = BinaryClassificationPipeline
y_expected = ww.init_series(pd.Series([0, 1, 1, 1, 1]), logical_type="double")
elif problem_type == "multiclass":
y = ww.init_series(pd.Series([0, 1, 2, 2, None]))
objective = "Log Loss Multiclass"
expected_pipeline_class = MulticlassClassificationPipeline
y_expected = ww.init_series(pd.Series([0, 1, 2, 2, 2]), logical_type="double")
else:
y = ww.init_series(pd.Series([0, 0.1, 0.2, None, None]))
objective = "R2"
expected_pipeline_class = RegressionPipeline
y_expected = ww.init_series(
pd.Series([0, 0.1, 0.2, 0.1, 0.1]), logical_type="double"
)
data_check = InvalidTargetDataCheck(problem_type, objective)
data_checks_output = data_check.validate(None, y)
action_pipeline = make_pipeline_from_data_check_output(
problem_type, data_checks_output
)
expected_parameters = (
{"Target Imputer": {"impute_strategy": "mean", "fill_value": None}}
if problem_type == "regression"
else {
"Target Imputer": {"impute_strategy": "most_frequent", "fill_value": None}
}
)
assert action_pipeline == expected_pipeline_class(
component_graph={"Target Imputer": [TargetImputer, "X", "y"]},
parameters=expected_parameters,
random_seed=0,
)
action_pipeline.fit(X, y)
_, y_t = action_pipeline.transform(X, y)
assert_series_equal(y_expected, y_t)
def test_data_checks_suggests_drop_rows():
a = np.arange(10) * 0.01
data = np.tile(a, (100, 10))
X = pd.DataFrame(data=data)
X.iloc[0, 3] = 1000
X.iloc[3, 25] = 1000
X.iloc[5, 55] = 10000
X.iloc[10, 72] = -1000
X.iloc[:, 90] = "string_values"
y = pd.Series(np.tile([0, 1], 50))
outliers_check = OutliersDataCheck()
data_checks_output = outliers_check.validate(X)
action_pipeline = make_pipeline_from_data_check_output("binary", data_checks_output)
assert action_pipeline == BinaryClassificationPipeline(
component_graph={"Drop Rows Transformer": [DropRowsTransformer, "X", "y"]},
parameters={"Drop Rows Transformer": {"indices_to_drop": [0, 3, 5, 10]}},
random_seed=0,
)
X_expected = X.drop([0, 3, 5, 10])
X_expected.ww.init()
y_expected = y.drop([0, 3, 5, 10])
action_pipeline.fit(X, y)
X_t, y_t = action_pipeline.transform(X, y)
assert_frame_equal(X_expected, X_t)
assert_series_equal(y_expected, y_t)
| true | true |
f71f1df4f03692a302604099633708d526d10823 | 241 | py | Python | tests/test_legacy.py | kamo-naoyuki/pyopenjtalk | 5d111301298ac630d2eae8c0a9e4c1af06b02fa4 | [
"MIT"
] | null | null | null | tests/test_legacy.py | kamo-naoyuki/pyopenjtalk | 5d111301298ac630d2eae8c0a9e4c1af06b02fa4 | [
"MIT"
] | null | null | null | tests/test_legacy.py | kamo-naoyuki/pyopenjtalk | 5d111301298ac630d2eae8c0a9e4c1af06b02fa4 | [
"MIT"
] | null | null | null | from pyopenjtalk.legacy import openjtalk
from nose.plugins.attrib import attr
@attr("local_only")
def test_legacy():
prons, labels, params = openjtalk("こんにちは")
for l in labels:
print(l)
assert "".join(prons) == "コンニチワ"
| 21.909091 | 46 | 0.680498 | from pyopenjtalk.legacy import openjtalk
from nose.plugins.attrib import attr
@attr("local_only")
def test_legacy():
prons, labels, params = openjtalk("こんにちは")
for l in labels:
print(l)
assert "".join(prons) == "コンニチワ"
| true | true |
f71f1e1feee03baa2748e6681aea5387b62bc527 | 2,458 | py | Python | sacrerouge/common/testing/util.py | danieldeutsch/decomposed-rouge | 0d723be8e3359f0bdcc9c7940336800895e46dbb | [
"Apache-2.0"
] | 1 | 2022-03-30T13:39:10.000Z | 2022-03-30T13:39:10.000Z | sacrerouge/common/testing/util.py | danieldeutsch/decomposed-rouge | 0d723be8e3359f0bdcc9c7940336800895e46dbb | [
"Apache-2.0"
] | null | null | null | sacrerouge/common/testing/util.py | danieldeutsch/decomposed-rouge | 0d723be8e3359f0bdcc9c7940336800895e46dbb | [
"Apache-2.0"
] | 1 | 2021-12-05T14:55:10.000Z | 2021-12-05T14:55:10.000Z | import argparse
from collections import defaultdict
from typing import Dict, List
from sacrerouge import build_argument_parser
from sacrerouge.data import Metrics, MetricsDict
from sacrerouge.data.types import ReferenceType, SummaryType
from sacrerouge.io import JsonlReader
def load_summaries(file_path: str) -> List[SummaryType]:
fields = []
for data in JsonlReader(file_path).read():
fields.append(data['summary'])
return fields
def load_references(file_path: str) -> List[ReferenceType]:
fields = []
for data in JsonlReader(file_path).read():
if 'summary' in data:
fields.append([data['summary']['text']])
elif 'summaries' in data:
fields.append([summary['text'] for summary in data['summaries']])
elif 'reference' in data:
fields.append([data['reference']['text']])
elif 'references' in data:
fields.append([reference['text'] for reference in data['references']])
return fields
def load_metrics_dicts(file_path: str) -> Dict[str, Dict[str, MetricsDict]]:
metrics_dicts = defaultdict(dict)
with JsonlReader(file_path, Metrics) as f:
for instance in f:
metrics_dicts[instance.instance_id][instance.summarizer_id] = instance.metrics
return metrics_dicts
def command_exists(parser: argparse.ArgumentParser, command: List[str]) -> bool:
"""
Checks to see if a specific command exists in the `parser`. The `parser` should
be the root `ArgumentParser` for the command. The method will traverse through
the `parser` to see if the `command` exists. This method does not work for checking
arguments of a specific command.
"""
# _subparsers is none when no subcommands exist
if parser._subparsers is None:
return False
for action in parser._subparsers._group_actions:
for choice, subparser in action.choices.items():
if choice == command[0]:
if len(command) == 1:
# The whole command has been matched
return True
else:
return command_exists(subparser, command[1:])
# We didn't find the first command, so it doesn't exist
return False
def sacrerouge_command_exists(command: List[str]) -> bool:
"""Verifies if the command exists for the 'sacrerouge' command."""
parser = build_argument_parser()
return command_exists(parser, command) | 36.686567 | 90 | 0.673718 | import argparse
from collections import defaultdict
from typing import Dict, List
from sacrerouge import build_argument_parser
from sacrerouge.data import Metrics, MetricsDict
from sacrerouge.data.types import ReferenceType, SummaryType
from sacrerouge.io import JsonlReader
def load_summaries(file_path: str) -> List[SummaryType]:
fields = []
for data in JsonlReader(file_path).read():
fields.append(data['summary'])
return fields
def load_references(file_path: str) -> List[ReferenceType]:
fields = []
for data in JsonlReader(file_path).read():
if 'summary' in data:
fields.append([data['summary']['text']])
elif 'summaries' in data:
fields.append([summary['text'] for summary in data['summaries']])
elif 'reference' in data:
fields.append([data['reference']['text']])
elif 'references' in data:
fields.append([reference['text'] for reference in data['references']])
return fields
def load_metrics_dicts(file_path: str) -> Dict[str, Dict[str, MetricsDict]]:
metrics_dicts = defaultdict(dict)
with JsonlReader(file_path, Metrics) as f:
for instance in f:
metrics_dicts[instance.instance_id][instance.summarizer_id] = instance.metrics
return metrics_dicts
def command_exists(parser: argparse.ArgumentParser, command: List[str]) -> bool:
if parser._subparsers is None:
return False
for action in parser._subparsers._group_actions:
for choice, subparser in action.choices.items():
if choice == command[0]:
if len(command) == 1:
return True
else:
return command_exists(subparser, command[1:])
return False
def sacrerouge_command_exists(command: List[str]) -> bool:
parser = build_argument_parser()
return command_exists(parser, command) | true | true |
f71f1e32af4b984c299468f63a15c54e52c1d245 | 2,017 | py | Python | tombomation/blog/models.py | tcuthbert/tombomation.net | 64932b533f88744b189937a2a71f74600c0b3e18 | [
"MIT"
] | null | null | null | tombomation/blog/models.py | tcuthbert/tombomation.net | 64932b533f88744b189937a2a71f74600c0b3e18 | [
"MIT"
] | null | null | null | tombomation/blog/models.py | tcuthbert/tombomation.net | 64932b533f88744b189937a2a71f74600c0b3e18 | [
"MIT"
] | null | null | null | from django.db import models
from markdown import markdown
# Create your models here.
# Reference: http://www.yaconiello.com/blog/part-1-creating-blog-system-using-django-markdown/
class Category(models.Model):
"""Category Model"""
title = models.CharField(
verbose_name = (u'Title'),
help_text = (u' '),
max_length = 255
)
slug = models.SlugField(
verbose_name = (u'Slug'),
help_text = (u'Uri identifier.'),
max_length = 255,
unique = True
)
class Meta:
app_label = (u'blog')
verbose_name = (u"Category")
verbose_name_plural = (u"Categories")
ordering = ['title',]
def __unicode__(self):
return "%s" % (self.title, )
class Post(models.Model):
"""Post Model"""
title = models.CharField(
verbose_name = (u'Title'),
help_text = (u' '),
max_length = 255
)
slug = models.SlugField(
verbose_name = (u'Slug'),
help_text = (u'Uri identifier.'),
max_length = 255,
unique = True
)
content_markdown = models.TextField(
verbose_name = (u'Content (Markdown)'),
help_text = (u'')
)
content_markup = models.TextField(
verbose_name = (u'Content (Markup)'),
help_text = (u' ')
)
categories = models.ManyToManyField(
Category,
verbose_name = (u'Categories'),
help_text = (u' '),
null = True,
blank = True
)
date_publish = models.DateTimeField(
verbose_name = (u'Publish Date'),
help_text = (u' '),
auto_now=True
)
class Meta:
app_label = (u'blog')
verbose_name = (u'Post')
verbose_name_plural = (u'Posts')
ordering = ['-date_publish']
def save(self, *args, **kwargs):
self.content_markup = markdown(self.content_markdown, ['codehilite'])
super(Post, self).save(*args, **kwargs)
def __unicode__(self):
return "%s" % (self.title,)
| 26.194805 | 94 | 0.565692 | from django.db import models
from markdown import markdown
class Category(models.Model):
title = models.CharField(
verbose_name = (u'Title'),
help_text = (u' '),
max_length = 255
)
slug = models.SlugField(
verbose_name = (u'Slug'),
help_text = (u'Uri identifier.'),
max_length = 255,
unique = True
)
class Meta:
app_label = (u'blog')
verbose_name = (u"Category")
verbose_name_plural = (u"Categories")
ordering = ['title',]
def __unicode__(self):
return "%s" % (self.title, )
class Post(models.Model):
title = models.CharField(
verbose_name = (u'Title'),
help_text = (u' '),
max_length = 255
)
slug = models.SlugField(
verbose_name = (u'Slug'),
help_text = (u'Uri identifier.'),
max_length = 255,
unique = True
)
content_markdown = models.TextField(
verbose_name = (u'Content (Markdown)'),
help_text = (u'')
)
content_markup = models.TextField(
verbose_name = (u'Content (Markup)'),
help_text = (u' ')
)
categories = models.ManyToManyField(
Category,
verbose_name = (u'Categories'),
help_text = (u' '),
null = True,
blank = True
)
date_publish = models.DateTimeField(
verbose_name = (u'Publish Date'),
help_text = (u' '),
auto_now=True
)
class Meta:
app_label = (u'blog')
verbose_name = (u'Post')
verbose_name_plural = (u'Posts')
ordering = ['-date_publish']
def save(self, *args, **kwargs):
self.content_markup = markdown(self.content_markdown, ['codehilite'])
super(Post, self).save(*args, **kwargs)
def __unicode__(self):
return "%s" % (self.title,)
| true | true |
f71f1e7f381f891029ed59f8389550fccb044dd7 | 19,628 | py | Python | env/lib/python3.7/site-packages/numba/tests/test_unicode.py | GU-DataLab/fairness-and-missing-values | 36a900aa235d1d53bd57e11c89e3f73f9a585aca | [
"MIT"
] | null | null | null | env/lib/python3.7/site-packages/numba/tests/test_unicode.py | GU-DataLab/fairness-and-missing-values | 36a900aa235d1d53bd57e11c89e3f73f9a585aca | [
"MIT"
] | null | null | null | env/lib/python3.7/site-packages/numba/tests/test_unicode.py | GU-DataLab/fairness-and-missing-values | 36a900aa235d1d53bd57e11c89e3f73f9a585aca | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# This file tests Python 3.4 style unicode strings
# Tests should be skipped on Python < 3.4
from __future__ import print_function
import sys
from itertools import permutations
from numba import njit
import numba.unittest_support as unittest
from .support import (TestCase, no_pyobj_flags, MemoryLeakMixin)
from numba.errors import TypingError
_py34_or_later = sys.version_info[:2] >= (3, 4)
def literal_usecase():
return '大处着眼,小处着手。'
def passthrough_usecase(x):
return x
def eq_usecase(x, y):
return x == y
def len_usecase(x):
return len(x)
def getitem_usecase(x, i):
return x[i]
def concat_usecase(x, y):
return x + y
def inplace_concat_usecase(x, y):
x += y
return x
def in_usecase(x, y):
return x in y
def lt_usecase(x, y):
return x < y
def le_usecase(x, y):
return x <= y
def gt_usecase(x, y):
return x > y
def ge_usecase(x, y):
return x >= y
def find_usecase(x, y):
return x.find(y)
def startswith_usecase(x, y):
return x.startswith(y)
def endswith_usecase(x, y):
return x.endswith(y)
def split_usecase(x, y):
return x.split(y)
def split_with_maxsplit_usecase(x, y, maxsplit):
return x.split(y, maxsplit)
def split_with_maxsplit_kwarg_usecase(x, y, maxsplit):
return x.split(y, maxsplit=maxsplit)
def split_whitespace_usecase(x):
return x.split()
def join_usecase(x, y):
return x.join(y)
def join_empty_usecase(x):
# hack to make empty typed list
l = ['']
l.pop()
return x.join(l)
class BaseTest(MemoryLeakMixin, TestCase):
def setUp(self):
super(BaseTest, self).setUp()
UNICODE_EXAMPLES = [
'ascii',
'12345',
'1234567890',
'¡Y tú quién te crees?',
'🐍⚡',
'大处着眼,小处着手。',
]
UNICODE_ORDERING_EXAMPLES = [
'',
'a'
'aa',
'aaa',
'b',
'aab',
'ab',
'asc',
'ascih',
'ascii',
'ascij',
'大处着眼,小处着手',
'大处着眼,小处着手。',
'大处着眼,小处着手。🐍⚡',
]
@unittest.skipUnless(_py34_or_later,
'unicode support requires Python 3.4 or later')
class TestUnicode(BaseTest):
def test_literal(self, flags=no_pyobj_flags):
pyfunc = literal_usecase
self.run_nullary_func(pyfunc, flags=flags)
def test_passthrough(self, flags=no_pyobj_flags):
pyfunc = passthrough_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
self.assertEqual(pyfunc(s), cfunc(s))
def test_eq(self, flags=no_pyobj_flags):
pyfunc = eq_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
for b in reversed(UNICODE_EXAMPLES):
self.assertEqual(pyfunc(a, b),
cfunc(a, b), '%s, %s' % (a, b))
def _check_ordering_op(self, usecase):
pyfunc = usecase
cfunc = njit(pyfunc)
# Check comparison to self
for a in UNICODE_ORDERING_EXAMPLES:
self.assertEqual(
pyfunc(a, a),
cfunc(a, a),
'%s: "%s", "%s"' % (usecase.__name__, a, a),
)
# Check comparison to adjacent
for a, b in permutations(UNICODE_ORDERING_EXAMPLES, r=2):
self.assertEqual(
pyfunc(a, b),
cfunc(a, b),
'%s: "%s", "%s"' % (usecase.__name__, a, b),
)
# and reversed
self.assertEqual(
pyfunc(b, a),
cfunc(b, a),
'%s: "%s", "%s"' % (usecase.__name__, b, a),
)
def test_lt(self, flags=no_pyobj_flags):
self._check_ordering_op(lt_usecase)
def test_le(self, flags=no_pyobj_flags):
self._check_ordering_op(le_usecase)
def test_gt(self, flags=no_pyobj_flags):
self._check_ordering_op(gt_usecase)
def test_ge(self, flags=no_pyobj_flags):
self._check_ordering_op(ge_usecase)
def test_len(self, flags=no_pyobj_flags):
pyfunc = len_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
self.assertEqual(pyfunc(s), cfunc(s))
def test_startswith(self, flags=no_pyobj_flags):
pyfunc = startswith_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
for b in [x for x in ['', 'x', a[:-2], a[3:], a, a + a]]:
self.assertEqual(pyfunc(a, b),
cfunc(a, b),
'%s, %s' % (a, b))
def test_endswith(self, flags=no_pyobj_flags):
pyfunc = endswith_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
for b in [x for x in ['', 'x', a[:-2], a[3:], a, a + a]]:
self.assertEqual(pyfunc(a, b),
cfunc(a, b),
'%s, %s' % (a, b))
def test_in(self, flags=no_pyobj_flags):
pyfunc = in_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
extras = ['', 'xx', a[::-1], a[:-2], a[3:], a, a + a]
for substr in [x for x in extras]:
self.assertEqual(pyfunc(substr, a),
cfunc(substr, a),
"'%s' in '%s'?" % (substr, a))
def test_find(self, flags=no_pyobj_flags):
pyfunc = find_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
extras = ['', 'xx', a[::-1], a[:-2], a[3:], a, a + a]
for substr in [x for x in extras]:
self.assertEqual(pyfunc(a, substr),
cfunc(a, substr),
"'%s'.find('%s')?" % (a, substr))
def test_getitem(self):
pyfunc = getitem_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
for i in range(-len(s)):
self.assertEqual(pyfunc(s, i),
cfunc(s, i),
"'%s'[%d]?" % (s, i))
def test_getitem_error(self):
self.disable_leak_check()
pyfunc = getitem_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
with self.assertRaises(IndexError) as raises:
pyfunc(s, len(s))
self.assertIn('string index out of range', str(raises.exception))
with self.assertRaises(IndexError) as raises:
cfunc(s, len(s))
self.assertIn('string index out of range', str(raises.exception))
def test_slice2(self):
pyfunc = getitem_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
for i in list(range(-len(s), len(s))):
for j in list(range(-len(s), len(s))):
sl = slice(i, j)
self.assertEqual(pyfunc(s, sl),
cfunc(s, sl),
"'%s'[%d:%d]?" % (s, i, j))
def test_slice2_error(self):
pyfunc = getitem_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
for i in [-2, -1, len(s), len(s) + 1]:
for j in [-2, -1, len(s), len(s) + 1]:
sl = slice(i, j)
self.assertEqual(pyfunc(s, sl),
cfunc(s, sl),
"'%s'[%d:%d]?" % (s, i, j))
def test_slice3(self):
pyfunc = getitem_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
for i in range(-len(s), len(s)):
for j in range(-len(s), len(s)):
for k in [-2, -1, 1, 2]:
sl = slice(i, j, k)
self.assertEqual(pyfunc(s, sl),
cfunc(s, sl),
"'%s'[%d:%d:%d]?" % (s, i, j, k))
def test_slice3_error(self):
pyfunc = getitem_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
for i in [-2, -1, len(s), len(s) + 1]:
for j in [-2, -1, len(s), len(s) + 1]:
for k in [-2, -1, 1, 2]:
sl = slice(i, j, k)
self.assertEqual(pyfunc(s, sl),
cfunc(s, sl),
"'%s'[%d:%d:%d]?" % (s, i, j, k))
def test_concat(self, flags=no_pyobj_flags):
pyfunc = concat_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
for b in UNICODE_EXAMPLES[::-1]:
self.assertEqual(pyfunc(a, b),
cfunc(a, b),
"'%s' + '%s'?" % (a, b))
def test_split_exception_empty_sep(self):
self.disable_leak_check()
pyfunc = split_usecase
cfunc = njit(pyfunc)
# Handle empty separator exception
for func in [pyfunc, cfunc]:
with self.assertRaises(ValueError) as raises:
func('a', '')
self.assertIn('empty separator', str(raises.exception))
def test_split_exception_noninteger_maxsplit(self):
pyfunc = split_with_maxsplit_usecase
cfunc = njit(pyfunc)
# Handle non-integer maxsplit exception
for sep in [' ', None]:
with self.assertRaises(TypingError) as raises:
cfunc('a', sep, 2.4)
self.assertIn('float64', str(raises.exception),
'non-integer maxsplit with sep = %s' % sep)
def test_split(self):
pyfunc = split_usecase
cfunc = njit(pyfunc)
CASES = [
(' a ', None),
('', '⚡'),
('abcabc', '⚡'),
('🐍⚡', '⚡'),
('🐍⚡🐍', '⚡'),
('abababa', 'a'),
('abababa', 'b'),
('abababa', 'c'),
('abababa', 'ab'),
('abababa', 'aba'),
]
for test_str, splitter in CASES:
self.assertEqual(pyfunc(test_str, splitter),
cfunc(test_str, splitter),
"'%s'.split('%s')?" % (test_str, splitter))
def test_split_with_maxsplit(self):
CASES = [
(' a ', None, 1),
('', '⚡', 1),
('abcabc', '⚡', 1),
('🐍⚡', '⚡', 1),
('🐍⚡🐍', '⚡', 1),
('abababa', 'a', 2),
('abababa', 'b', 1),
('abababa', 'c', 2),
('abababa', 'ab', 1),
('abababa', 'aba', 5),
]
for pyfunc, fmt_str in [(split_with_maxsplit_usecase, "'%s'.split('%s', %d)?"),
(split_with_maxsplit_kwarg_usecase, "'%s'.split('%s', maxsplit=%d)?")]:
cfunc = njit(pyfunc)
for test_str, splitter, maxsplit in CASES:
self.assertEqual(pyfunc(test_str, splitter, maxsplit),
cfunc(test_str, splitter, maxsplit),
fmt_str % (test_str, splitter, maxsplit))
def test_split_whitespace(self):
# explicit sep=None cases covered in test_split and test_split_with_maxsplit
pyfunc = split_whitespace_usecase
cfunc = njit(pyfunc)
#list copied from https://github.com/python/cpython/blob/master/Objects/unicodetype_db.h
all_whitespace = ''.join(map(chr, [
0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x001C, 0x001D, 0x001E, 0x001F, 0x0020,
0x0085, 0x00A0, 0x1680, 0x2000, 0x2001, 0x2002, 0x2003, 0x2004, 0x2005, 0x2006,
0x2007, 0x2008, 0x2009, 0x200A, 0x2028, 0x2029, 0x202F, 0x205F, 0x3000
]))
CASES = [
'',
'abcabc',
'🐍 ⚡',
'🐍 ⚡ 🐍',
'🐍 ⚡ 🐍 ',
' 🐍 ⚡ 🐍',
' 🐍' + all_whitespace + '⚡ 🐍 ',
]
for test_str in CASES:
self.assertEqual(pyfunc(test_str),
cfunc(test_str),
"'%s'.split()?" % (test_str,))
def test_join_empty(self):
# Can't pass empty list to nopython mode, so we have to make a
# separate test case
pyfunc = join_empty_usecase
cfunc = njit(pyfunc)
CASES = [
'',
'🐍🐍🐍',
]
for sep in CASES:
self.assertEqual(pyfunc(sep),
cfunc(sep),
"'%s'.join([])?" % (sep,))
def test_join_non_string_exception(self):
# Verify that join of list of integers raises typing exception
pyfunc = join_usecase
cfunc = njit(pyfunc)
# Handle empty separator exception
with self.assertRaises(TypingError) as raises:
cfunc('', [1,2,3])
# This error message is obscure, but indicates the error was trapped in typing of str.join()
# Feel free to change this as we update error messages.
exc_message = str(raises.exception)
self.assertIn("Invalid use of BoundFunction", exc_message)
self.assertIn("(reflected list(int", exc_message) # could be int32 or int64
def test_join(self):
pyfunc = join_usecase
cfunc = njit(pyfunc)
CASES = [
('', ['', '', '']),
('a', ['', '', '']),
('', ['a', 'bbbb', 'c']),
('🐍🐍🐍', ['⚡⚡'] * 5),
]
for sep, parts in CASES:
self.assertEqual(pyfunc(sep, parts),
cfunc(sep, parts),
"'%s'.join('%s')?" % (sep, parts))
def test_join_interleave_str(self):
# can pass a string as the parts iterable
pyfunc = join_usecase
cfunc = njit(pyfunc)
CASES = [
('abc', '123'),
('🐍🐍🐍', '⚡⚡'),
]
for sep, parts in CASES:
self.assertEqual(pyfunc(sep, parts),
cfunc(sep, parts),
"'%s'.join('%s')?" % (sep, parts))
def test_inplace_concat(self, flags=no_pyobj_flags):
pyfunc = inplace_concat_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
for b in UNICODE_EXAMPLES[::-1]:
self.assertEqual(pyfunc(a, b),
cfunc(a, b),
"'%s' + '%s'?" % (a, b))
def test_pointless_slice(self, flags=no_pyobj_flags):
def pyfunc(a):
return a[:]
cfunc = njit(pyfunc)
args = ['a']
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_walk_backwards(self, flags=no_pyobj_flags):
def pyfunc(a):
return a[::-1]
cfunc = njit(pyfunc)
args = ['a']
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_stride_slice(self, flags=no_pyobj_flags):
def pyfunc(a):
return a[::2]
cfunc = njit(pyfunc)
args = ['a']
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_basic_lt(self, flags=no_pyobj_flags):
def pyfunc(a, b):
return a < b
cfunc = njit(pyfunc)
args = ['ab', 'b']
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_basic_gt(self, flags=no_pyobj_flags):
def pyfunc(a, b):
return a > b
cfunc = njit(pyfunc)
args = ['ab', 'b']
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_comparison(self):
def pyfunc(option, x, y):
if option == '==':
return x == y
elif option == '!=':
return x != y
elif option == '<':
return x < y
elif option == '>':
return x > y
elif option == '<=':
return x <= y
elif option == '>=':
return x >= y
else:
return None
cfunc = njit(pyfunc)
for x, y in permutations(UNICODE_ORDERING_EXAMPLES, r=2):
for cmpop in ['==', '!=', '<', '>', '<=', '>=', '']:
args = [cmpop, x, y]
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_literal_concat(self):
def pyfunc(x):
abc = 'abc'
if len(x):
return abc + 'b123' + x + 'IO'
else:
return x + abc + '123' + x
cfunc = njit(pyfunc)
args = ['x']
self.assertEqual(pyfunc(*args), cfunc(*args))
args = ['']
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_literal_comparison(self):
def pyfunc(option):
x = 'a123'
y = 'aa12'
if option == '==':
return x == y
elif option == '!=':
return x != y
elif option == '<':
return x < y
elif option == '>':
return x > y
elif option == '<=':
return x <= y
elif option == '>=':
return x >= y
else:
return None
cfunc = njit(pyfunc)
for cmpop in ['==', '!=', '<', '>', '<=', '>=', '']:
args = [cmpop]
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_literal_len(self):
def pyfunc():
return len('abc')
cfunc = njit(pyfunc)
self.assertEqual(pyfunc(), cfunc())
def test_literal_getitem(self):
def pyfunc(which):
return 'abc'[which]
cfunc = njit(pyfunc)
for a in [-1, 0, 1, slice(1, None), slice(None, -1)]:
args = [a]
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_literal_in(self):
def pyfunc(x):
return x in '9876zabiuh'
cfunc = njit(pyfunc)
for a in ['a', '9', '1', '', '8uha', '987']:
args = [a]
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_literal_xyzwith(self):
def pyfunc(x, y):
return 'abc'.startswith(x), 'cde'.endswith(y)
cfunc = njit(pyfunc)
for args in permutations('abcdefg', r=2):
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_literal_find(self):
def pyfunc(x):
return 'abc'.find(x), x.find('a')
cfunc = njit(pyfunc)
for a in ['ab']:
args = [a]
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
@unittest.skipUnless(_py34_or_later,
'unicode support requires Python 3.4 or later')
class TestUnicodeInTuple(BaseTest):
def test_const_unicode_in_tuple(self):
# Issue 3673
@njit
def f():
return ('aa',) < ('bb',)
self.assertEqual(f.py_func(), f())
@njit
def f():
return ('cc',) < ('bb',)
self.assertEqual(f.py_func(), f())
def test_const_unicode_in_hetero_tuple(self):
@njit
def f():
return ('aa', 1) < ('bb', 1)
self.assertEqual(f.py_func(), f())
@njit
def f():
return ('aa', 1) < ('aa', 2)
self.assertEqual(f.py_func(), f())
if __name__ == '__main__':
unittest.main()
| 29.339312 | 103 | 0.481659 |
from __future__ import print_function
import sys
from itertools import permutations
from numba import njit
import numba.unittest_support as unittest
from .support import (TestCase, no_pyobj_flags, MemoryLeakMixin)
from numba.errors import TypingError
_py34_or_later = sys.version_info[:2] >= (3, 4)
def literal_usecase():
return '大处着眼,小处着手。'
def passthrough_usecase(x):
return x
def eq_usecase(x, y):
return x == y
def len_usecase(x):
return len(x)
def getitem_usecase(x, i):
return x[i]
def concat_usecase(x, y):
return x + y
def inplace_concat_usecase(x, y):
x += y
return x
def in_usecase(x, y):
return x in y
def lt_usecase(x, y):
return x < y
def le_usecase(x, y):
return x <= y
def gt_usecase(x, y):
return x > y
def ge_usecase(x, y):
return x >= y
def find_usecase(x, y):
return x.find(y)
def startswith_usecase(x, y):
return x.startswith(y)
def endswith_usecase(x, y):
return x.endswith(y)
def split_usecase(x, y):
return x.split(y)
def split_with_maxsplit_usecase(x, y, maxsplit):
return x.split(y, maxsplit)
def split_with_maxsplit_kwarg_usecase(x, y, maxsplit):
return x.split(y, maxsplit=maxsplit)
def split_whitespace_usecase(x):
return x.split()
def join_usecase(x, y):
return x.join(y)
def join_empty_usecase(x):
l = ['']
l.pop()
return x.join(l)
class BaseTest(MemoryLeakMixin, TestCase):
def setUp(self):
super(BaseTest, self).setUp()
UNICODE_EXAMPLES = [
'ascii',
'12345',
'1234567890',
'¡Y tú quién te crees?',
'🐍⚡',
'大处着眼,小处着手。',
]
UNICODE_ORDERING_EXAMPLES = [
'',
'a'
'aa',
'aaa',
'b',
'aab',
'ab',
'asc',
'ascih',
'ascii',
'ascij',
'大处着眼,小处着手',
'大处着眼,小处着手。',
'大处着眼,小处着手。🐍⚡',
]
@unittest.skipUnless(_py34_or_later,
'unicode support requires Python 3.4 or later')
class TestUnicode(BaseTest):
def test_literal(self, flags=no_pyobj_flags):
pyfunc = literal_usecase
self.run_nullary_func(pyfunc, flags=flags)
def test_passthrough(self, flags=no_pyobj_flags):
pyfunc = passthrough_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
self.assertEqual(pyfunc(s), cfunc(s))
def test_eq(self, flags=no_pyobj_flags):
pyfunc = eq_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
for b in reversed(UNICODE_EXAMPLES):
self.assertEqual(pyfunc(a, b),
cfunc(a, b), '%s, %s' % (a, b))
def _check_ordering_op(self, usecase):
pyfunc = usecase
cfunc = njit(pyfunc)
for a in UNICODE_ORDERING_EXAMPLES:
self.assertEqual(
pyfunc(a, a),
cfunc(a, a),
'%s: "%s", "%s"' % (usecase.__name__, a, a),
)
for a, b in permutations(UNICODE_ORDERING_EXAMPLES, r=2):
self.assertEqual(
pyfunc(a, b),
cfunc(a, b),
'%s: "%s", "%s"' % (usecase.__name__, a, b),
)
self.assertEqual(
pyfunc(b, a),
cfunc(b, a),
'%s: "%s", "%s"' % (usecase.__name__, b, a),
)
def test_lt(self, flags=no_pyobj_flags):
self._check_ordering_op(lt_usecase)
def test_le(self, flags=no_pyobj_flags):
self._check_ordering_op(le_usecase)
def test_gt(self, flags=no_pyobj_flags):
self._check_ordering_op(gt_usecase)
def test_ge(self, flags=no_pyobj_flags):
self._check_ordering_op(ge_usecase)
def test_len(self, flags=no_pyobj_flags):
pyfunc = len_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
self.assertEqual(pyfunc(s), cfunc(s))
def test_startswith(self, flags=no_pyobj_flags):
pyfunc = startswith_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
for b in [x for x in ['', 'x', a[:-2], a[3:], a, a + a]]:
self.assertEqual(pyfunc(a, b),
cfunc(a, b),
'%s, %s' % (a, b))
def test_endswith(self, flags=no_pyobj_flags):
pyfunc = endswith_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
for b in [x for x in ['', 'x', a[:-2], a[3:], a, a + a]]:
self.assertEqual(pyfunc(a, b),
cfunc(a, b),
'%s, %s' % (a, b))
def test_in(self, flags=no_pyobj_flags):
pyfunc = in_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
extras = ['', 'xx', a[::-1], a[:-2], a[3:], a, a + a]
for substr in [x for x in extras]:
self.assertEqual(pyfunc(substr, a),
cfunc(substr, a),
"'%s' in '%s'?" % (substr, a))
def test_find(self, flags=no_pyobj_flags):
pyfunc = find_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
extras = ['', 'xx', a[::-1], a[:-2], a[3:], a, a + a]
for substr in [x for x in extras]:
self.assertEqual(pyfunc(a, substr),
cfunc(a, substr),
"'%s'.find('%s')?" % (a, substr))
def test_getitem(self):
pyfunc = getitem_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
for i in range(-len(s)):
self.assertEqual(pyfunc(s, i),
cfunc(s, i),
"'%s'[%d]?" % (s, i))
def test_getitem_error(self):
self.disable_leak_check()
pyfunc = getitem_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
with self.assertRaises(IndexError) as raises:
pyfunc(s, len(s))
self.assertIn('string index out of range', str(raises.exception))
with self.assertRaises(IndexError) as raises:
cfunc(s, len(s))
self.assertIn('string index out of range', str(raises.exception))
def test_slice2(self):
pyfunc = getitem_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
for i in list(range(-len(s), len(s))):
for j in list(range(-len(s), len(s))):
sl = slice(i, j)
self.assertEqual(pyfunc(s, sl),
cfunc(s, sl),
"'%s'[%d:%d]?" % (s, i, j))
def test_slice2_error(self):
pyfunc = getitem_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
for i in [-2, -1, len(s), len(s) + 1]:
for j in [-2, -1, len(s), len(s) + 1]:
sl = slice(i, j)
self.assertEqual(pyfunc(s, sl),
cfunc(s, sl),
"'%s'[%d:%d]?" % (s, i, j))
def test_slice3(self):
pyfunc = getitem_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
for i in range(-len(s), len(s)):
for j in range(-len(s), len(s)):
for k in [-2, -1, 1, 2]:
sl = slice(i, j, k)
self.assertEqual(pyfunc(s, sl),
cfunc(s, sl),
"'%s'[%d:%d:%d]?" % (s, i, j, k))
def test_slice3_error(self):
pyfunc = getitem_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
for i in [-2, -1, len(s), len(s) + 1]:
for j in [-2, -1, len(s), len(s) + 1]:
for k in [-2, -1, 1, 2]:
sl = slice(i, j, k)
self.assertEqual(pyfunc(s, sl),
cfunc(s, sl),
"'%s'[%d:%d:%d]?" % (s, i, j, k))
def test_concat(self, flags=no_pyobj_flags):
pyfunc = concat_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
for b in UNICODE_EXAMPLES[::-1]:
self.assertEqual(pyfunc(a, b),
cfunc(a, b),
"'%s' + '%s'?" % (a, b))
def test_split_exception_empty_sep(self):
self.disable_leak_check()
pyfunc = split_usecase
cfunc = njit(pyfunc)
for func in [pyfunc, cfunc]:
with self.assertRaises(ValueError) as raises:
func('a', '')
self.assertIn('empty separator', str(raises.exception))
def test_split_exception_noninteger_maxsplit(self):
pyfunc = split_with_maxsplit_usecase
cfunc = njit(pyfunc)
for sep in [' ', None]:
with self.assertRaises(TypingError) as raises:
cfunc('a', sep, 2.4)
self.assertIn('float64', str(raises.exception),
'non-integer maxsplit with sep = %s' % sep)
def test_split(self):
pyfunc = split_usecase
cfunc = njit(pyfunc)
CASES = [
(' a ', None),
('', '⚡'),
('abcabc', '⚡'),
('🐍⚡', '⚡'),
('🐍⚡🐍', '⚡'),
('abababa', 'a'),
('abababa', 'b'),
('abababa', 'c'),
('abababa', 'ab'),
('abababa', 'aba'),
]
for test_str, splitter in CASES:
self.assertEqual(pyfunc(test_str, splitter),
cfunc(test_str, splitter),
"'%s'.split('%s')?" % (test_str, splitter))
def test_split_with_maxsplit(self):
CASES = [
(' a ', None, 1),
('', '⚡', 1),
('abcabc', '⚡', 1),
('🐍⚡', '⚡', 1),
('🐍⚡🐍', '⚡', 1),
('abababa', 'a', 2),
('abababa', 'b', 1),
('abababa', 'c', 2),
('abababa', 'ab', 1),
('abababa', 'aba', 5),
]
for pyfunc, fmt_str in [(split_with_maxsplit_usecase, "'%s'.split('%s', %d)?"),
(split_with_maxsplit_kwarg_usecase, "'%s'.split('%s', maxsplit=%d)?")]:
cfunc = njit(pyfunc)
for test_str, splitter, maxsplit in CASES:
self.assertEqual(pyfunc(test_str, splitter, maxsplit),
cfunc(test_str, splitter, maxsplit),
fmt_str % (test_str, splitter, maxsplit))
def test_split_whitespace(self):
pyfunc = split_whitespace_usecase
cfunc = njit(pyfunc)
all_whitespace = ''.join(map(chr, [
0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x001C, 0x001D, 0x001E, 0x001F, 0x0020,
0x0085, 0x00A0, 0x1680, 0x2000, 0x2001, 0x2002, 0x2003, 0x2004, 0x2005, 0x2006,
0x2007, 0x2008, 0x2009, 0x200A, 0x2028, 0x2029, 0x202F, 0x205F, 0x3000
]))
CASES = [
'',
'abcabc',
'🐍 ⚡',
'🐍 ⚡ 🐍',
'🐍 ⚡ 🐍 ',
' 🐍 ⚡ 🐍',
' 🐍' + all_whitespace + '⚡ 🐍 ',
]
for test_str in CASES:
self.assertEqual(pyfunc(test_str),
cfunc(test_str),
"'%s'.split()?" % (test_str,))
def test_join_empty(self):
# separate test case
pyfunc = join_empty_usecase
cfunc = njit(pyfunc)
CASES = [
'',
'🐍🐍🐍',
]
for sep in CASES:
self.assertEqual(pyfunc(sep),
cfunc(sep),
"'%s'.join([])?" % (sep,))
def test_join_non_string_exception(self):
# Verify that join of list of integers raises typing exception
pyfunc = join_usecase
cfunc = njit(pyfunc)
# Handle empty separator exception
with self.assertRaises(TypingError) as raises:
cfunc('', [1,2,3])
# This error message is obscure, but indicates the error was trapped in typing of str.join()
# Feel free to change this as we update error messages.
exc_message = str(raises.exception)
self.assertIn("Invalid use of BoundFunction", exc_message)
self.assertIn("(reflected list(int", exc_message) # could be int32 or int64
def test_join(self):
pyfunc = join_usecase
cfunc = njit(pyfunc)
CASES = [
('', ['', '', '']),
('a', ['', '', '']),
('', ['a', 'bbbb', 'c']),
('🐍🐍🐍', ['⚡⚡'] * 5),
]
for sep, parts in CASES:
self.assertEqual(pyfunc(sep, parts),
cfunc(sep, parts),
"'%s'.join('%s')?" % (sep, parts))
def test_join_interleave_str(self):
# can pass a string as the parts iterable
pyfunc = join_usecase
cfunc = njit(pyfunc)
CASES = [
('abc', '123'),
('🐍🐍🐍', '⚡⚡'),
]
for sep, parts in CASES:
self.assertEqual(pyfunc(sep, parts),
cfunc(sep, parts),
"'%s'.join('%s')?" % (sep, parts))
def test_inplace_concat(self, flags=no_pyobj_flags):
pyfunc = inplace_concat_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
for b in UNICODE_EXAMPLES[::-1]:
self.assertEqual(pyfunc(a, b),
cfunc(a, b),
"'%s' + '%s'?" % (a, b))
def test_pointless_slice(self, flags=no_pyobj_flags):
def pyfunc(a):
return a[:]
cfunc = njit(pyfunc)
args = ['a']
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_walk_backwards(self, flags=no_pyobj_flags):
def pyfunc(a):
return a[::-1]
cfunc = njit(pyfunc)
args = ['a']
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_stride_slice(self, flags=no_pyobj_flags):
def pyfunc(a):
return a[::2]
cfunc = njit(pyfunc)
args = ['a']
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_basic_lt(self, flags=no_pyobj_flags):
def pyfunc(a, b):
return a < b
cfunc = njit(pyfunc)
args = ['ab', 'b']
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_basic_gt(self, flags=no_pyobj_flags):
def pyfunc(a, b):
return a > b
cfunc = njit(pyfunc)
args = ['ab', 'b']
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_comparison(self):
def pyfunc(option, x, y):
if option == '==':
return x == y
elif option == '!=':
return x != y
elif option == '<':
return x < y
elif option == '>':
return x > y
elif option == '<=':
return x <= y
elif option == '>=':
return x >= y
else:
return None
cfunc = njit(pyfunc)
for x, y in permutations(UNICODE_ORDERING_EXAMPLES, r=2):
for cmpop in ['==', '!=', '<', '>', '<=', '>=', '']:
args = [cmpop, x, y]
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_literal_concat(self):
def pyfunc(x):
abc = 'abc'
if len(x):
return abc + 'b123' + x + 'IO'
else:
return x + abc + '123' + x
cfunc = njit(pyfunc)
args = ['x']
self.assertEqual(pyfunc(*args), cfunc(*args))
args = ['']
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_literal_comparison(self):
def pyfunc(option):
x = 'a123'
y = 'aa12'
if option == '==':
return x == y
elif option == '!=':
return x != y
elif option == '<':
return x < y
elif option == '>':
return x > y
elif option == '<=':
return x <= y
elif option == '>=':
return x >= y
else:
return None
cfunc = njit(pyfunc)
for cmpop in ['==', '!=', '<', '>', '<=', '>=', '']:
args = [cmpop]
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_literal_len(self):
def pyfunc():
return len('abc')
cfunc = njit(pyfunc)
self.assertEqual(pyfunc(), cfunc())
def test_literal_getitem(self):
def pyfunc(which):
return 'abc'[which]
cfunc = njit(pyfunc)
for a in [-1, 0, 1, slice(1, None), slice(None, -1)]:
args = [a]
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_literal_in(self):
def pyfunc(x):
return x in '9876zabiuh'
cfunc = njit(pyfunc)
for a in ['a', '9', '1', '', '8uha', '987']:
args = [a]
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_literal_xyzwith(self):
def pyfunc(x, y):
return 'abc'.startswith(x), 'cde'.endswith(y)
cfunc = njit(pyfunc)
for args in permutations('abcdefg', r=2):
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_literal_find(self):
def pyfunc(x):
return 'abc'.find(x), x.find('a')
cfunc = njit(pyfunc)
for a in ['ab']:
args = [a]
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
@unittest.skipUnless(_py34_or_later,
'unicode support requires Python 3.4 or later')
class TestUnicodeInTuple(BaseTest):
def test_const_unicode_in_tuple(self):
# Issue 3673
@njit
def f():
return ('aa',) < ('bb',)
self.assertEqual(f.py_func(), f())
@njit
def f():
return ('cc',) < ('bb',)
self.assertEqual(f.py_func(), f())
def test_const_unicode_in_hetero_tuple(self):
@njit
def f():
return ('aa', 1) < ('bb', 1)
self.assertEqual(f.py_func(), f())
@njit
def f():
return ('aa', 1) < ('aa', 2)
self.assertEqual(f.py_func(), f())
if __name__ == '__main__':
unittest.main()
| true | true |
f71f1f105e2e4d5cd2867235033eebefdf2a3279 | 861 | py | Python | leetcode/py/17-letter-combinations-of-a-phone-number.py | tanchao/algo | 76de42b7b415f8251a50553027efad998d0b4137 | [
"MIT"
] | 2 | 2016-12-08T08:42:03.000Z | 2020-05-15T21:08:22.000Z | leetcode/py/17-letter-combinations-of-a-phone-number.py | tanchao/algo | 76de42b7b415f8251a50553027efad998d0b4137 | [
"MIT"
] | null | null | null | leetcode/py/17-letter-combinations-of-a-phone-number.py | tanchao/algo | 76de42b7b415f8251a50553027efad998d0b4137 | [
"MIT"
] | null | null | null | NUMBER_TO_LETTER = {
'2': ['a', 'b', 'c'],
'3': ['d', 'e', 'f'],
'4': ['g', 'h', 'i'],
'5': ['j', 'k', 'l'],
'6': ['m', 'n', 'o'],
'7': ['p', 'q', 'r', 's'],
'8': ['t', 'u', 'v'],
'9': ['w', 'x', 'y', 'z'],
}
class Solution:
def letterCombinations(self, digits: str):
if len(digits) == 0:
return []
all_combinations = ['']
for digit in digits:
if digit not in NUMBER_TO_LETTER:
return [] # @todo: unexpected
curr_combinations = []
for letter in NUMBER_TO_LETTER[digit]:
for combination in all_combinations:
curr_combinations.append(combination + letter)
all_combinations = curr_combinations
return all_combinations
solution = Solution()
print(solution.letterCombinations("42")) | 28.7 | 66 | 0.487805 | NUMBER_TO_LETTER = {
'2': ['a', 'b', 'c'],
'3': ['d', 'e', 'f'],
'4': ['g', 'h', 'i'],
'5': ['j', 'k', 'l'],
'6': ['m', 'n', 'o'],
'7': ['p', 'q', 'r', 's'],
'8': ['t', 'u', 'v'],
'9': ['w', 'x', 'y', 'z'],
}
class Solution:
def letterCombinations(self, digits: str):
if len(digits) == 0:
return []
all_combinations = ['']
for digit in digits:
if digit not in NUMBER_TO_LETTER:
return []
curr_combinations = []
for letter in NUMBER_TO_LETTER[digit]:
for combination in all_combinations:
curr_combinations.append(combination + letter)
all_combinations = curr_combinations
return all_combinations
solution = Solution()
print(solution.letterCombinations("42")) | true | true |
f71f1f1e8feb9c87e7d3db3d77a033d89c3682cb | 1,213 | py | Python | setup.py | imanmousaei/coinexpy | be542652b493c588dbf4d630ec50ab92cf5e5371 | [
"MIT"
] | 12 | 2021-09-02T18:54:04.000Z | 2022-03-17T11:40:39.000Z | setup.py | imanmousaei/coinexpy | be542652b493c588dbf4d630ec50ab92cf5e5371 | [
"MIT"
] | 4 | 2021-09-13T11:14:03.000Z | 2021-12-11T09:45:33.000Z | setup.py | imanmousaei/coinexpy | be542652b493c588dbf4d630ec50ab92cf5e5371 | [
"MIT"
] | 5 | 2021-09-02T19:03:17.000Z | 2022-01-13T13:10:32.000Z | from setuptools import setup
version = '0.5.1'
setup(
name='coinexpy',
packages=['coinexpy'],
version=version,
license='MIT',
description='Python wrapper for Coinex APIs',
long_description_content_type='text/markdown',
long_description=open('README.md', 'rt').read(),
author='Iman Mousaei',
author_email='imanmousaei1379@gmail.com',
url='https://github.com/imanmousaei/coinexpy',
download_url=f'https://github.com/imanmousaei/coinexpy/archive/refs/tags/v{version}.tar.gz',
keywords=['coinex', 'api', 'wrapper', 'trade', 'crypto', 'bitcoin'],
install_requires=[
'urllib3'
],
classifiers=[
# "3 - Alpha", "4 - Beta" or "5 - Production/Stable"
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
)
| 32.783784 | 96 | 0.612531 | from setuptools import setup
version = '0.5.1'
setup(
name='coinexpy',
packages=['coinexpy'],
version=version,
license='MIT',
description='Python wrapper for Coinex APIs',
long_description_content_type='text/markdown',
long_description=open('README.md', 'rt').read(),
author='Iman Mousaei',
author_email='imanmousaei1379@gmail.com',
url='https://github.com/imanmousaei/coinexpy',
download_url=f'https://github.com/imanmousaei/coinexpy/archive/refs/tags/v{version}.tar.gz',
keywords=['coinex', 'api', 'wrapper', 'trade', 'crypto', 'bitcoin'],
install_requires=[
'urllib3'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
)
| true | true |
f71f1f30802fc7155f517414445cf1dadb679435 | 3,282 | py | Python | yt_dlp/extractor/beatport.py | olipfei/yt-dlp | 7879e79d11a2e5855167820518df49caf623fe48 | [
"Unlicense"
] | 11 | 2022-01-06T22:09:50.000Z | 2022-03-12T22:26:22.000Z | yt_dlp/extractor/beatport.py | olipfei/yt-dlp | 7879e79d11a2e5855167820518df49caf623fe48 | [
"Unlicense"
] | 4 | 2022-02-25T08:20:18.000Z | 2022-03-17T16:16:20.000Z | yt_dlp/extractor/beatport.py | olipfei/yt-dlp | 7879e79d11a2e5855167820518df49caf623fe48 | [
"Unlicense"
] | 3 | 2022-02-19T08:59:13.000Z | 2022-03-06T16:11:21.000Z | import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import int_or_none
class BeatportIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.|pro\.)?beatport\.com/track/(?P<display_id>[^/]+)/(?P<id>[0-9]+)'
_TESTS = [{
'url': 'https://beatport.com/track/synesthesia-original-mix/5379371',
'md5': 'b3c34d8639a2f6a7f734382358478887',
'info_dict': {
'id': '5379371',
'display_id': 'synesthesia-original-mix',
'ext': 'mp4',
'title': 'Froxic - Synesthesia (Original Mix)',
},
}, {
'url': 'https://beatport.com/track/love-and-war-original-mix/3756896',
'md5': 'e44c3025dfa38c6577fbaeb43da43514',
'info_dict': {
'id': '3756896',
'display_id': 'love-and-war-original-mix',
'ext': 'mp3',
'title': 'Wolfgang Gartner - Love & War (Original Mix)',
},
}, {
'url': 'https://beatport.com/track/birds-original-mix/4991738',
'md5': 'a1fd8e8046de3950fd039304c186c05f',
'info_dict': {
'id': '4991738',
'display_id': 'birds-original-mix',
'ext': 'mp4',
'title': "Tos, Middle Milk, Mumblin' Johnsson - Birds (Original Mix)",
}
}]
def _real_extract(self, url):
mobj = self._match_valid_url(url)
track_id = mobj.group('id')
display_id = mobj.group('display_id')
webpage = self._download_webpage(url, display_id)
playables = self._parse_json(
self._search_regex(
r'window\.Playables\s*=\s*({.+?});', webpage,
'playables info', flags=re.DOTALL),
track_id)
track = next(t for t in playables['tracks'] if t['id'] == int(track_id))
title = ', '.join((a['name'] for a in track['artists'])) + ' - ' + track['name']
if track['mix']:
title += ' (' + track['mix'] + ')'
formats = []
for ext, info in track['preview'].items():
if not info['url']:
continue
fmt = {
'url': info['url'],
'ext': ext,
'format_id': ext,
'vcodec': 'none',
}
if ext == 'mp3':
fmt['acodec'] = 'mp3'
fmt['abr'] = 96
fmt['asr'] = 44100
elif ext == 'mp4':
fmt['acodec'] = 'aac'
fmt['abr'] = 96
fmt['asr'] = 44100
formats.append(fmt)
self._sort_formats(formats)
images = []
for name, info in track['images'].items():
image_url = info.get('url')
if name == 'dynamic' or not image_url:
continue
image = {
'id': name,
'url': image_url,
'height': int_or_none(info.get('height')),
'width': int_or_none(info.get('width')),
}
images.append(image)
return {
'id': compat_str(track.get('id')) or track_id,
'display_id': track.get('slug') or display_id,
'title': title,
'formats': formats,
'thumbnails': images,
}
| 33.151515 | 101 | 0.481718 | import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import int_or_none
class BeatportIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.|pro\.)?beatport\.com/track/(?P<display_id>[^/]+)/(?P<id>[0-9]+)'
_TESTS = [{
'url': 'https://beatport.com/track/synesthesia-original-mix/5379371',
'md5': 'b3c34d8639a2f6a7f734382358478887',
'info_dict': {
'id': '5379371',
'display_id': 'synesthesia-original-mix',
'ext': 'mp4',
'title': 'Froxic - Synesthesia (Original Mix)',
},
}, {
'url': 'https://beatport.com/track/love-and-war-original-mix/3756896',
'md5': 'e44c3025dfa38c6577fbaeb43da43514',
'info_dict': {
'id': '3756896',
'display_id': 'love-and-war-original-mix',
'ext': 'mp3',
'title': 'Wolfgang Gartner - Love & War (Original Mix)',
},
}, {
'url': 'https://beatport.com/track/birds-original-mix/4991738',
'md5': 'a1fd8e8046de3950fd039304c186c05f',
'info_dict': {
'id': '4991738',
'display_id': 'birds-original-mix',
'ext': 'mp4',
'title': "Tos, Middle Milk, Mumblin' Johnsson - Birds (Original Mix)",
}
}]
def _real_extract(self, url):
mobj = self._match_valid_url(url)
track_id = mobj.group('id')
display_id = mobj.group('display_id')
webpage = self._download_webpage(url, display_id)
playables = self._parse_json(
self._search_regex(
r'window\.Playables\s*=\s*({.+?});', webpage,
'playables info', flags=re.DOTALL),
track_id)
track = next(t for t in playables['tracks'] if t['id'] == int(track_id))
title = ', '.join((a['name'] for a in track['artists'])) + ' - ' + track['name']
if track['mix']:
title += ' (' + track['mix'] + ')'
formats = []
for ext, info in track['preview'].items():
if not info['url']:
continue
fmt = {
'url': info['url'],
'ext': ext,
'format_id': ext,
'vcodec': 'none',
}
if ext == 'mp3':
fmt['acodec'] = 'mp3'
fmt['abr'] = 96
fmt['asr'] = 44100
elif ext == 'mp4':
fmt['acodec'] = 'aac'
fmt['abr'] = 96
fmt['asr'] = 44100
formats.append(fmt)
self._sort_formats(formats)
images = []
for name, info in track['images'].items():
image_url = info.get('url')
if name == 'dynamic' or not image_url:
continue
image = {
'id': name,
'url': image_url,
'height': int_or_none(info.get('height')),
'width': int_or_none(info.get('width')),
}
images.append(image)
return {
'id': compat_str(track.get('id')) or track_id,
'display_id': track.get('slug') or display_id,
'title': title,
'formats': formats,
'thumbnails': images,
}
| true | true |
f71f1f5ea6c652ba83e9c4487fc915c47f98899c | 8,304 | py | Python | core/controllers/collection_editor.py | jlau323/oppia | 37438a2c9bf7e66892fb9a6a93a1fe4ca7a82691 | [
"Apache-2.0"
] | 2 | 2021-04-08T01:06:08.000Z | 2021-06-02T08:20:13.000Z | core/controllers/collection_editor.py | gitter-badger/oppia | 7d8e659264582d7ce74bc6c139e597b82bca0e04 | [
"Apache-2.0"
] | 1 | 2020-05-27T06:08:17.000Z | 2020-05-27T06:08:17.000Z | core/controllers/collection_editor.py | gitter-badger/oppia | 7d8e659264582d7ce74bc6c139e597b82bca0e04 | [
"Apache-2.0"
] | 1 | 2020-11-05T12:26:10.000Z | 2020-11-05T12:26:10.000Z | # coding: utf-8
# Copyright 2015 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for the collections editor."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import base64
from core.controllers import acl_decorators
from core.controllers import base
from core.domain import collection_services
from core.domain import rights_manager
from core.domain import search_services
from core.domain import summary_services
from core.platform import models
import feconf
current_user_services = models.Registry.import_current_user_services()
def _require_valid_version(version_from_payload, collection_version):
"""Check that the payload version matches the given collection version."""
if version_from_payload is None:
raise base.BaseHandler.InvalidInputException(
'Invalid POST request: a version must be specified.')
if version_from_payload != collection_version:
raise base.BaseHandler.InvalidInputException(
'Trying to update version %s of collection from version %s, '
'which is too old. Please reload the page and try again.'
% (collection_version, version_from_payload))
class CollectionEditorHandler(base.BaseHandler):
"""Base class for all handlers for the collection editor page."""
pass
class CollectionEditorPage(CollectionEditorHandler):
"""The editor page for a single collection."""
@acl_decorators.can_edit_collection
def get(self, _):
"""Handles GET requests."""
self.render_template('collection-editor-page.mainpage.html')
class EditableCollectionDataHandler(CollectionEditorHandler):
"""A data handler for collections which supports writing."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_edit_collection
def get(self, collection_id):
"""Populates the data on the individual collection page."""
collection_dict = (
summary_services.get_learner_collection_dict_by_id(
collection_id, self.user,
allow_invalid_explorations=True))
self.values.update({
'collection': collection_dict
})
self.render_json(self.values)
@acl_decorators.can_edit_collection
def put(self, collection_id):
"""Updates properties of the given collection."""
collection = collection_services.get_collection_by_id(collection_id)
version = self.payload.get('version')
_require_valid_version(version, collection.version)
commit_message = self.payload.get('commit_message')
if (commit_message is not None and
len(commit_message) > feconf.MAX_COMMIT_MESSAGE_LENGTH):
raise self.InvalidInputException(
'Commit messages must be at most %s characters long.'
% feconf.MAX_COMMIT_MESSAGE_LENGTH)
change_list = self.payload.get('change_list')
collection_services.update_collection(
self.user_id, collection_id, change_list, commit_message)
collection_dict = (
summary_services.get_learner_collection_dict_by_id(
collection_id, self.user,
allow_invalid_explorations=True))
# Send the updated collection back to the frontend.
self.values.update({
'collection': collection_dict
})
self.render_json(self.values)
class CollectionRightsHandler(CollectionEditorHandler):
"""Handles management of collection editing rights."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_edit_collection
def get(self, collection_id):
"""Gets the editing rights for the given collection.
Args:
collection_id: str. ID for the collection.
"""
(collection, collection_rights) = (
collection_services.get_collection_and_collection_rights_by_id(
collection_id))
self.values.update({
'can_edit': True,
'can_unpublish': rights_manager.check_can_unpublish_activity(
self.user, collection_rights),
'collection_id': collection.id,
'is_private': rights_manager.is_collection_private(collection_id),
'owner_names': rights_manager.get_collection_owner_names(
collection_id)
})
self.render_json(self.values)
class CollectionPublishHandler(base.BaseHandler):
"""Handles the publication of the given collection."""
@acl_decorators.can_publish_collection
def put(self, collection_id):
"""Publishes the given collection."""
collection = collection_services.get_collection_by_id(collection_id)
version = self.payload.get('version')
_require_valid_version(version, collection.version)
collection.validate(strict=True)
collection_services.validate_exps_in_collection_are_public(
collection)
collection_services.publish_collection_and_update_user_profiles(
self.user, collection_id)
collection_services.index_collections_given_ids([
collection_id])
collection_rights = rights_manager.get_collection_rights(
collection_id, strict=False)
self.values.update({
'can_edit': True,
'can_unpublish': rights_manager.check_can_unpublish_activity(
self.user, collection_rights),
'collection_id': collection.id,
'is_private': rights_manager.is_collection_private(collection_id),
'owner_names': rights_manager.get_collection_owner_names(
collection_id)
})
self.render_json(self.values)
class CollectionUnpublishHandler(base.BaseHandler):
"""Handles the unpublication of the given collection."""
@acl_decorators.can_unpublish_collection
def put(self, collection_id):
"""Unpublishes the given collection."""
collection = collection_services.get_collection_by_id(collection_id)
version = self.payload.get('version')
_require_valid_version(version, collection.version)
rights_manager.unpublish_collection(self.user, collection_id)
search_services.delete_collections_from_search_index([
collection_id])
collection_rights = rights_manager.get_collection_rights(
collection_id, strict=False)
self.values.update({
'can_edit': True,
'can_unpublish': rights_manager.check_can_unpublish_activity(
self.user, collection_rights),
'collection_id': collection.id,
'is_private': rights_manager.is_collection_private(collection_id),
'owner_names': rights_manager.get_collection_owner_names(
collection_id)
})
self.render_json(self.values)
class ExplorationMetadataSearchHandler(base.BaseHandler):
"""Provides data for exploration search."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.open_access
def get(self):
"""Handles GET requests."""
query_string = base64.b64decode(self.request.get('q'))
search_cursor = self.request.get('cursor', None)
collection_node_metadata_list, new_search_cursor = (
summary_services.get_exp_metadata_dicts_matching_query(
query_string, search_cursor, self.user))
self.values.update({
'collection_node_metadata_list': collection_node_metadata_list,
'search_cursor': new_search_cursor,
})
self.render_json(self.values)
| 35.793103 | 78 | 0.698579 |
from __future__ import absolute_import
from __future__ import unicode_literals
import base64
from core.controllers import acl_decorators
from core.controllers import base
from core.domain import collection_services
from core.domain import rights_manager
from core.domain import search_services
from core.domain import summary_services
from core.platform import models
import feconf
current_user_services = models.Registry.import_current_user_services()
def _require_valid_version(version_from_payload, collection_version):
if version_from_payload is None:
raise base.BaseHandler.InvalidInputException(
'Invalid POST request: a version must be specified.')
if version_from_payload != collection_version:
raise base.BaseHandler.InvalidInputException(
'Trying to update version %s of collection from version %s, '
'which is too old. Please reload the page and try again.'
% (collection_version, version_from_payload))
class CollectionEditorHandler(base.BaseHandler):
pass
class CollectionEditorPage(CollectionEditorHandler):
@acl_decorators.can_edit_collection
def get(self, _):
self.render_template('collection-editor-page.mainpage.html')
class EditableCollectionDataHandler(CollectionEditorHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_edit_collection
def get(self, collection_id):
collection_dict = (
summary_services.get_learner_collection_dict_by_id(
collection_id, self.user,
allow_invalid_explorations=True))
self.values.update({
'collection': collection_dict
})
self.render_json(self.values)
@acl_decorators.can_edit_collection
def put(self, collection_id):
collection = collection_services.get_collection_by_id(collection_id)
version = self.payload.get('version')
_require_valid_version(version, collection.version)
commit_message = self.payload.get('commit_message')
if (commit_message is not None and
len(commit_message) > feconf.MAX_COMMIT_MESSAGE_LENGTH):
raise self.InvalidInputException(
'Commit messages must be at most %s characters long.'
% feconf.MAX_COMMIT_MESSAGE_LENGTH)
change_list = self.payload.get('change_list')
collection_services.update_collection(
self.user_id, collection_id, change_list, commit_message)
collection_dict = (
summary_services.get_learner_collection_dict_by_id(
collection_id, self.user,
allow_invalid_explorations=True))
self.values.update({
'collection': collection_dict
})
self.render_json(self.values)
class CollectionRightsHandler(CollectionEditorHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_edit_collection
def get(self, collection_id):
(collection, collection_rights) = (
collection_services.get_collection_and_collection_rights_by_id(
collection_id))
self.values.update({
'can_edit': True,
'can_unpublish': rights_manager.check_can_unpublish_activity(
self.user, collection_rights),
'collection_id': collection.id,
'is_private': rights_manager.is_collection_private(collection_id),
'owner_names': rights_manager.get_collection_owner_names(
collection_id)
})
self.render_json(self.values)
class CollectionPublishHandler(base.BaseHandler):
@acl_decorators.can_publish_collection
def put(self, collection_id):
collection = collection_services.get_collection_by_id(collection_id)
version = self.payload.get('version')
_require_valid_version(version, collection.version)
collection.validate(strict=True)
collection_services.validate_exps_in_collection_are_public(
collection)
collection_services.publish_collection_and_update_user_profiles(
self.user, collection_id)
collection_services.index_collections_given_ids([
collection_id])
collection_rights = rights_manager.get_collection_rights(
collection_id, strict=False)
self.values.update({
'can_edit': True,
'can_unpublish': rights_manager.check_can_unpublish_activity(
self.user, collection_rights),
'collection_id': collection.id,
'is_private': rights_manager.is_collection_private(collection_id),
'owner_names': rights_manager.get_collection_owner_names(
collection_id)
})
self.render_json(self.values)
class CollectionUnpublishHandler(base.BaseHandler):
@acl_decorators.can_unpublish_collection
def put(self, collection_id):
collection = collection_services.get_collection_by_id(collection_id)
version = self.payload.get('version')
_require_valid_version(version, collection.version)
rights_manager.unpublish_collection(self.user, collection_id)
search_services.delete_collections_from_search_index([
collection_id])
collection_rights = rights_manager.get_collection_rights(
collection_id, strict=False)
self.values.update({
'can_edit': True,
'can_unpublish': rights_manager.check_can_unpublish_activity(
self.user, collection_rights),
'collection_id': collection.id,
'is_private': rights_manager.is_collection_private(collection_id),
'owner_names': rights_manager.get_collection_owner_names(
collection_id)
})
self.render_json(self.values)
class ExplorationMetadataSearchHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.open_access
def get(self):
query_string = base64.b64decode(self.request.get('q'))
search_cursor = self.request.get('cursor', None)
collection_node_metadata_list, new_search_cursor = (
summary_services.get_exp_metadata_dicts_matching_query(
query_string, search_cursor, self.user))
self.values.update({
'collection_node_metadata_list': collection_node_metadata_list,
'search_cursor': new_search_cursor,
})
self.render_json(self.values)
| true | true |
f71f1f82deeb1ff237e916ec83b6ae59d364cc6e | 2,310 | py | Python | demoapp/views.py | Innersystemm/mmad | dde4d72fb6d2471f9637ddb3116535a32b6909cd | [
"MIT"
] | null | null | null | demoapp/views.py | Innersystemm/mmad | dde4d72fb6d2471f9637ddb3116535a32b6909cd | [
"MIT"
] | null | null | null | demoapp/views.py | Innersystemm/mmad | dde4d72fb6d2471f9637ddb3116535a32b6909cd | [
"MIT"
] | null | null | null | from datetime import datetime
from demoapp.sign_classifier import Sign
from demoapp.sign_db import SignDB
from demoapp.sign_validator import SignValidator
from django.http import HttpRequest
from django.shortcuts import render
from .forms import *
def index(request):
assert isinstance(request, HttpRequest)
return render(
request,
'index.html',
{
'title': 'Home Page',
'date': datetime.now(),
}
)
def auth(request):
assert isinstance(request, HttpRequest)
# создание экземпляра формы(django forms), содержит поля: id пользователя и строка признаков
auth_form = AuthForm()
status = ''
# в этой функции выпоняется проверка, зарегистрирован ли пользователь с указанным id
# и если таковой имеется то с помощью алгоритам knn этот пользователь классифицируется по полученному
# вектору признаков
# функция вовзращает true если указанный пользователь существует и полученный вектор признаков
# успешно клссифицирован, в противно млычае возвращается false
if 'signs' in request.POST:
result, status = Sign.process_sign(request.POST['user_id'], request.POST['signs'])
# Генерируем html страницу и в качестве параметров передаем форму авторизации
# и статус текущей авторизации
return render(
request,
'auth.html',
{
'form': auth_form,
'status': status,
})
def create_account(request):
create_account_form = CreateAccountForm()
msg = 'Введите параметры'
if 'userId' in request.POST \
and 'signVector1' in request.POST \
and 'signVector2' in request.POST \
and 'signVector3' in request.POST:
user_id = request.POST['userId']
signs = [request.POST['signVector1'], request.POST['signVector2'], request.POST['signVector3']]
result, status = SignValidator.is_sign_valid(signs, sign_len=4)
if result is True:
proceed_user_id, proceed_signs = Sign.cast_data_to_numeric_val(user_id=user_id, signs=signs)
status, msg = SignDB.add_user(proceed_user_id, proceed_signs)
return render(
request,
'create_account.html',
{
'form': create_account_form,
'status': msg
}
)
| 32.083333 | 105 | 0.666667 | from datetime import datetime
from demoapp.sign_classifier import Sign
from demoapp.sign_db import SignDB
from demoapp.sign_validator import SignValidator
from django.http import HttpRequest
from django.shortcuts import render
from .forms import *
def index(request):
assert isinstance(request, HttpRequest)
return render(
request,
'index.html',
{
'title': 'Home Page',
'date': datetime.now(),
}
)
def auth(request):
assert isinstance(request, HttpRequest)
auth_form = AuthForm()
status = ''
if 'signs' in request.POST:
result, status = Sign.process_sign(request.POST['user_id'], request.POST['signs'])
return render(
request,
'auth.html',
{
'form': auth_form,
'status': status,
})
def create_account(request):
create_account_form = CreateAccountForm()
msg = 'Введите параметры'
if 'userId' in request.POST \
and 'signVector1' in request.POST \
and 'signVector2' in request.POST \
and 'signVector3' in request.POST:
user_id = request.POST['userId']
signs = [request.POST['signVector1'], request.POST['signVector2'], request.POST['signVector3']]
result, status = SignValidator.is_sign_valid(signs, sign_len=4)
if result is True:
proceed_user_id, proceed_signs = Sign.cast_data_to_numeric_val(user_id=user_id, signs=signs)
status, msg = SignDB.add_user(proceed_user_id, proceed_signs)
return render(
request,
'create_account.html',
{
'form': create_account_form,
'status': msg
}
)
| true | true |
f71f200d67c6267ab2d95f6a61fffdcb40c31d6a | 6,201 | py | Python | tutorials_templates/build.py | dataloop-ai/dtlpy-documentation | fe607a084fa660328ae5ab29ba8e05a4627aad51 | [
"MIT"
] | 3 | 2022-01-07T20:33:49.000Z | 2022-03-22T12:41:30.000Z | tutorials_templates/build.py | dataloop-ai/dtlpy-documentation | fe607a084fa660328ae5ab29ba8e05a4627aad51 | [
"MIT"
] | null | null | null | tutorials_templates/build.py | dataloop-ai/dtlpy-documentation | fe607a084fa660328ae5ab29ba8e05a4627aad51 | [
"MIT"
] | 3 | 2021-12-29T13:11:30.000Z | 2022-03-22T12:25:50.000Z | import json
import importlib.util
import inspect
import os
LINE_HEADER = '<func:'
TEMPLATES_PATH = 'tutorials_templates'
TUTORIALS_PATH = 'tutorials'
NOTEBOOK_TEMPLATE = {"cells": [],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.7"
}
},
"nbformat": 4,
"nbformat_minor": 4}
MD_CELL_TEMPLATE = {
"cell_type": "markdown",
"metadata": {},
"source": []
}
CODE_CELL_TEMPLATE = {
"cell_type": "code",
"execution_count": 0,
"metadata": {},
"outputs": [],
"source": []
}
def _build_md_block(func_string):
# ignore 0 def line and the """
source = list()
for line in func_string[2:-1]:
source.append(line[4:].rstrip() + ' \n') # remove 4 spaces at the beginning
#
# # remove the "def" line
# func_string = func_string[1:]
# source = list()
# for line in func_string:
# source.append(line) # remove
# source = source[1:-1] # remove first and last line (triple quotes)
return source
def build_notebook(skeleton_filepath):
mds_filepath = os.path.join(os.path.dirname(skeleton_filepath), 'mds.py')
scripts_filepath = os.path.join(os.path.dirname(skeleton_filepath), 'scripts.py')
####
mds_spec = importlib.util.spec_from_file_location('mds', mds_filepath)
mds_module = importlib.util.module_from_spec(mds_spec)
mds_spec.loader.exec_module(mds_module)
####
scripts_spec = importlib.util.spec_from_file_location('scripts', scripts_filepath)
scripts_module = importlib.util.module_from_spec(scripts_spec)
scripts_spec.loader.exec_module(scripts_module)
with open(skeleton_filepath, 'r') as f:
skeleton = json.load(f)
cells = list()
for cell_def in skeleton:
if cell_def['type'] == 'md':
cell = MD_CELL_TEMPLATE.copy()
func_name = cell_def['name']
func = getattr(mds_module, func_name)
func_string, _ = inspect.getsourcelines(func)
source = _build_md_block(func_string)
# source = source# adding double space for new line
cell['source'] = source
elif cell_def['type'] == 'code':
cell = CODE_CELL_TEMPLATE.copy()
func_name = cell_def['name']
func = getattr(scripts_module, func_name)
func_string, _ = inspect.getsourcelines(func)
# remove the "def" line
func_string = func_string[1:]
source = [l[4:] for l in func_string]
cell['source'] = source
else:
raise ValueError('unknown cell type {!r}'.format(cell_def['type']))
cells.append(cell)
NOTEBOOK_TEMPLATE['cells'] = cells
notebook_filepath = os.path.dirname(skeleton_filepath).replace(TEMPLATES_PATH, TUTORIALS_PATH)
notebook_filepath = os.path.join(notebook_filepath, 'chapter.ipynb')
os.makedirs(os.path.dirname(notebook_filepath), exist_ok=True)
with open(notebook_filepath, 'w', encoding='UTF-8') as f:
json.dump(NOTEBOOK_TEMPLATE, f)
def build_md_file(skeleton_filepath):
mds_filepath = os.path.join(os.path.dirname(skeleton_filepath), 'mds.py')
scripts_filepath = os.path.join(os.path.dirname(skeleton_filepath), 'scripts.py')
####
mds_spec = importlib.util.spec_from_file_location('mds', mds_filepath)
mds_module = importlib.util.module_from_spec(mds_spec)
mds_spec.loader.exec_module(mds_module)
####
scripts_spec = importlib.util.spec_from_file_location('scripts', scripts_filepath)
scripts_module = importlib.util.module_from_spec(scripts_spec)
scripts_spec.loader.exec_module(scripts_module)
with open(skeleton_filepath, 'r') as f:
skeleton = json.load(f)
lines = list()
for cell_def in skeleton:
if cell_def['type'] == 'md':
func_name = cell_def['name']
func = getattr(mds_module, func_name)
func_string, _ = inspect.getsourcelines(func)
lines.extend(_build_md_block(func_string))
elif cell_def['type'] == 'code':
func_name = cell_def['name']
func = getattr(scripts_module, func_name)
func_string, _ = inspect.getsourcelines(func)
lines.append('\n```python\n')
# ignore 0 def line and the """
for line in func_string[1:]:
lines.append(line[4:]) # remove spaces at the beginning
lines.append('```\n')
else:
raise ValueError('unknown cell type {!r}'.format(cell_def['type']))
md_filepath = os.path.dirname(skeleton_filepath).replace(TEMPLATES_PATH, TUTORIALS_PATH)
md_filepath = os.path.join(md_filepath, 'chapter.md')
os.makedirs(os.path.dirname(md_filepath), exist_ok=True)
with open(md_filepath, 'w', encoding='UTF-8') as f:
f.writelines(lines)
def main():
for path, subdirs, files in os.walk(TEMPLATES_PATH):
for filename in files:
if filename == 'skeleton.json':
print('Preparing {!r} ...'.format(path))
# skeleton_filepath = "tutorials_templates/faas/multiple_functions/skeleton.json"
skeleton_filepath = os.path.join(path, filename)
build_notebook(skeleton_filepath=skeleton_filepath)
build_md_file(skeleton_filepath=skeleton_filepath)
print('Done!')
if __name__ == "__main__":
main()
| 37.810976 | 98 | 0.584583 | import json
import importlib.util
import inspect
import os
LINE_HEADER = '<func:'
TEMPLATES_PATH = 'tutorials_templates'
TUTORIALS_PATH = 'tutorials'
NOTEBOOK_TEMPLATE = {"cells": [],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.7"
}
},
"nbformat": 4,
"nbformat_minor": 4}
MD_CELL_TEMPLATE = {
"cell_type": "markdown",
"metadata": {},
"source": []
}
CODE_CELL_TEMPLATE = {
"cell_type": "code",
"execution_count": 0,
"metadata": {},
"outputs": [],
"source": []
}
def _build_md_block(func_string):
source = list()
for line in func_string[2:-1]:
source.append(line[4:].rstrip() + ' \n') # remove 4 spaces at the beginning
#
# # remove the "def" line
# func_string = func_string[1:]
# source = list()
# for line in func_string:
# source.append(line) # remove
# source = source[1:-1] # remove first and last line (triple quotes)
return source
def build_notebook(skeleton_filepath):
mds_filepath = os.path.join(os.path.dirname(skeleton_filepath), 'mds.py')
scripts_filepath = os.path.join(os.path.dirname(skeleton_filepath), 'scripts.py')
####
mds_spec = importlib.util.spec_from_file_location('mds', mds_filepath)
mds_module = importlib.util.module_from_spec(mds_spec)
mds_spec.loader.exec_module(mds_module)
####
scripts_spec = importlib.util.spec_from_file_location('scripts', scripts_filepath)
scripts_module = importlib.util.module_from_spec(scripts_spec)
scripts_spec.loader.exec_module(scripts_module)
with open(skeleton_filepath, 'r') as f:
skeleton = json.load(f)
cells = list()
for cell_def in skeleton:
if cell_def['type'] == 'md':
cell = MD_CELL_TEMPLATE.copy()
func_name = cell_def['name']
func = getattr(mds_module, func_name)
func_string, _ = inspect.getsourcelines(func)
source = _build_md_block(func_string)
# source = source# adding double space for new line
cell['source'] = source
elif cell_def['type'] == 'code':
cell = CODE_CELL_TEMPLATE.copy()
func_name = cell_def['name']
func = getattr(scripts_module, func_name)
func_string, _ = inspect.getsourcelines(func)
# remove the "def" line
func_string = func_string[1:]
source = [l[4:] for l in func_string]
cell['source'] = source
else:
raise ValueError('unknown cell type {!r}'.format(cell_def['type']))
cells.append(cell)
NOTEBOOK_TEMPLATE['cells'] = cells
notebook_filepath = os.path.dirname(skeleton_filepath).replace(TEMPLATES_PATH, TUTORIALS_PATH)
notebook_filepath = os.path.join(notebook_filepath, 'chapter.ipynb')
os.makedirs(os.path.dirname(notebook_filepath), exist_ok=True)
with open(notebook_filepath, 'w', encoding='UTF-8') as f:
json.dump(NOTEBOOK_TEMPLATE, f)
def build_md_file(skeleton_filepath):
mds_filepath = os.path.join(os.path.dirname(skeleton_filepath), 'mds.py')
scripts_filepath = os.path.join(os.path.dirname(skeleton_filepath), 'scripts.py')
####
mds_spec = importlib.util.spec_from_file_location('mds', mds_filepath)
mds_module = importlib.util.module_from_spec(mds_spec)
mds_spec.loader.exec_module(mds_module)
####
scripts_spec = importlib.util.spec_from_file_location('scripts', scripts_filepath)
scripts_module = importlib.util.module_from_spec(scripts_spec)
scripts_spec.loader.exec_module(scripts_module)
with open(skeleton_filepath, 'r') as f:
skeleton = json.load(f)
lines = list()
for cell_def in skeleton:
if cell_def['type'] == 'md':
func_name = cell_def['name']
func = getattr(mds_module, func_name)
func_string, _ = inspect.getsourcelines(func)
lines.extend(_build_md_block(func_string))
elif cell_def['type'] == 'code':
func_name = cell_def['name']
func = getattr(scripts_module, func_name)
func_string, _ = inspect.getsourcelines(func)
lines.append('\n```python\n')
# ignore 0 def line and the """
for line in func_string[1:]:
lines.append(line[4:])
lines.append('```\n')
else:
raise ValueError('unknown cell type {!r}'.format(cell_def['type']))
md_filepath = os.path.dirname(skeleton_filepath).replace(TEMPLATES_PATH, TUTORIALS_PATH)
md_filepath = os.path.join(md_filepath, 'chapter.md')
os.makedirs(os.path.dirname(md_filepath), exist_ok=True)
with open(md_filepath, 'w', encoding='UTF-8') as f:
f.writelines(lines)
def main():
for path, subdirs, files in os.walk(TEMPLATES_PATH):
for filename in files:
if filename == 'skeleton.json':
print('Preparing {!r} ...'.format(path))
skeleton_filepath = os.path.join(path, filename)
build_notebook(skeleton_filepath=skeleton_filepath)
build_md_file(skeleton_filepath=skeleton_filepath)
print('Done!')
if __name__ == "__main__":
main()
| true | true |
f71f205dda870b1a5da937518b795fa9c5dde373 | 2,306 | py | Python | tests/functional/tests/security/conftest.py | mahsaama/MRUPolicyInOpenCAS | bb97122f31dd64f2fb7d2be47057cf0721d109ab | [
"BSD-3-Clause-Clear"
] | 3 | 2021-07-29T08:39:03.000Z | 2022-02-25T10:00:36.000Z | tests/functional/tests/security/conftest.py | josehu07/ocf-mf | 3e54f9a1de24ec7f44869e7f2c5a6883321d32bd | [
"BSD-3-Clause-Clear"
] | null | null | null | tests/functional/tests/security/conftest.py | josehu07/ocf-mf | 3e54f9a1de24ec7f44869e7f2c5a6883321d32bd | [
"BSD-3-Clause-Clear"
] | null | null | null | #
# Copyright(c) 2019-2020 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
import os
import sys
from ctypes import (
c_uint64,
c_uint32,
c_uint16,
c_int
)
from tests.utils.random import RandomStringGenerator, RandomGenerator, DefaultRanges, Range
from pyocf.types.cache import CacheMode, EvictionPolicy, MetadataLayout, PromotionPolicy
from pyocf.types.shared import CacheLineSize
import pytest
sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
def enum_min(enum):
return list(enum)[0].value
def enum_max(enum):
return list(enum)[-1].value
def enum_range(enum):
return Range(enum_min(enum), enum_max(enum))
@pytest.fixture(params=RandomGenerator(DefaultRanges.UINT16))
def c_uint16_randomize(request):
return request.param
@pytest.fixture(params=RandomGenerator(DefaultRanges.UINT32))
def c_uint32_randomize(request):
return request.param
@pytest.fixture(params=RandomGenerator(DefaultRanges.UINT64))
def c_uint64_randomize(request):
return request.param
@pytest.fixture(params=RandomGenerator(DefaultRanges.INT))
def c_int_randomize(request):
return request.param
@pytest.fixture(params=RandomGenerator(DefaultRanges.INT))
def c_int_sector_randomize(request):
return request.param // 512 * 512
@pytest.fixture(params=RandomStringGenerator())
def string_randomize(request):
return request.param
@pytest.fixture(
params=RandomGenerator(DefaultRanges.UINT32).exclude_range(enum_range(CacheMode))
)
def not_cache_mode_randomize(request):
return request.param
@pytest.fixture(
params=RandomGenerator(DefaultRanges.UINT32).exclude_range(enum_range(CacheLineSize))
)
def not_cache_line_size_randomize(request):
return request.param
@pytest.fixture(
params=RandomGenerator(DefaultRanges.UINT32).exclude_range(enum_range(EvictionPolicy))
)
def not_eviction_policy_randomize(request):
return request.param
@pytest.fixture(
params=RandomGenerator(DefaultRanges.UINT32).exclude_range(enum_range(PromotionPolicy))
)
def not_promotion_policy_randomize(request):
return request.param
@pytest.fixture(
params=RandomGenerator(DefaultRanges.UINT32).exclude_range(enum_range(MetadataLayout))
)
def not_metadata_layout_randomize(request):
return request.param
| 23.292929 | 91 | 0.79098 |
import os
import sys
from ctypes import (
c_uint64,
c_uint32,
c_uint16,
c_int
)
from tests.utils.random import RandomStringGenerator, RandomGenerator, DefaultRanges, Range
from pyocf.types.cache import CacheMode, EvictionPolicy, MetadataLayout, PromotionPolicy
from pyocf.types.shared import CacheLineSize
import pytest
sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
def enum_min(enum):
return list(enum)[0].value
def enum_max(enum):
return list(enum)[-1].value
def enum_range(enum):
return Range(enum_min(enum), enum_max(enum))
@pytest.fixture(params=RandomGenerator(DefaultRanges.UINT16))
def c_uint16_randomize(request):
return request.param
@pytest.fixture(params=RandomGenerator(DefaultRanges.UINT32))
def c_uint32_randomize(request):
return request.param
@pytest.fixture(params=RandomGenerator(DefaultRanges.UINT64))
def c_uint64_randomize(request):
return request.param
@pytest.fixture(params=RandomGenerator(DefaultRanges.INT))
def c_int_randomize(request):
return request.param
@pytest.fixture(params=RandomGenerator(DefaultRanges.INT))
def c_int_sector_randomize(request):
return request.param // 512 * 512
@pytest.fixture(params=RandomStringGenerator())
def string_randomize(request):
return request.param
@pytest.fixture(
params=RandomGenerator(DefaultRanges.UINT32).exclude_range(enum_range(CacheMode))
)
def not_cache_mode_randomize(request):
return request.param
@pytest.fixture(
params=RandomGenerator(DefaultRanges.UINT32).exclude_range(enum_range(CacheLineSize))
)
def not_cache_line_size_randomize(request):
return request.param
@pytest.fixture(
params=RandomGenerator(DefaultRanges.UINT32).exclude_range(enum_range(EvictionPolicy))
)
def not_eviction_policy_randomize(request):
return request.param
@pytest.fixture(
params=RandomGenerator(DefaultRanges.UINT32).exclude_range(enum_range(PromotionPolicy))
)
def not_promotion_policy_randomize(request):
return request.param
@pytest.fixture(
params=RandomGenerator(DefaultRanges.UINT32).exclude_range(enum_range(MetadataLayout))
)
def not_metadata_layout_randomize(request):
return request.param
| true | true |
f71f21c0e694d95be7a898ee0e483f58d5892616 | 4,759 | py | Python | docs/conf.py | compas-dev/compas_occ | 9ff1bf3bdab6c750930e9864edcfdc8afee255ab | [
"MIT"
] | 6 | 2021-03-23T09:31:02.000Z | 2022-02-14T13:13:43.000Z | docs/conf.py | compas-dev/compas_occ | 9ff1bf3bdab6c750930e9864edcfdc8afee255ab | [
"MIT"
] | 6 | 2021-03-23T15:03:14.000Z | 2022-01-18T07:43:36.000Z | docs/conf.py | compas-dev/compas_occ | 9ff1bf3bdab6c750930e9864edcfdc8afee255ab | [
"MIT"
] | 4 | 2021-03-23T14:51:31.000Z | 2022-03-29T07:48:42.000Z | # flake8: noqa
# -*- coding: utf-8 -*-
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = "1.0"
import sys
import os
import inspect
import importlib
import sphinx_compas_theme
from sphinx.ext.napoleon.docstring import NumpyDocstring
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../src'))
# -- General configuration ------------------------------------------------
project = "COMPAS OCC"
copyright = "Block Research Group - ETH Zurich"
author = "tom van mele"
release = "0.3.3"
version = ".".join(release.split(".")[0:2])
master_doc = "index"
source_suffix = [".rst", ]
templates_path = sphinx_compas_theme.get_autosummary_templates_path()
exclude_patterns = []
pygments_style = "sphinx"
show_authors = True
add_module_names = True
language = None
# -- Extension configuration ------------------------------------------------
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.doctest",
"sphinx.ext.coverage",
"sphinx.ext.linkcode",
"sphinx.ext.extlinks",
"sphinx.ext.intersphinx",
"sphinx.ext.mathjax",
"sphinx.ext.napoleon",
"sphinx.ext.githubpages",
# "matplotlib.sphinxext.plot_directive",
]
# autodoc options
autodoc_mock_imports = [
"System",
"clr",
"Eto",
"Rhino",
"Grasshopper",
"scriptcontext",
"rhinoscriptsyntax",
"bpy",
"bmesh",
"mathutils"
]
autodoc_default_options = {
"undoc-members": True,
"show-inheritance": True,
}
autodoc_member_order = "alphabetical"
autoclass_content = "class"
def skip(app, what, name, obj, would_skip, options):
if name.startswith('_'):
return True
return would_skip
def setup(app):
app.connect("autodoc-skip-member", skip)
# autosummary options
autosummary_generate = True
# napoleon options
napoleon_google_docstring = False
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = False
napoleon_use_rtype = False
# plot options
# plot_html_show_source_link = False
# plot_html_show_formats = False
# docstring sections
def parse_attributes_section(self, section):
return self._format_fields("Attributes", self._consume_fields())
NumpyDocstring._parse_attributes_section = parse_attributes_section
def patched_parse(self):
self._sections["attributes"] = self._parse_attributes_section
self._unpatched_parse()
NumpyDocstring._unpatched_parse = NumpyDocstring._parse
NumpyDocstring._parse = patched_parse
# intersphinx options
intersphinx_mapping = {
"python": ("https://docs.python.org/", None),
"compas": ("https://compas.dev/compas/latest/", None),
}
# linkcode
def linkcode_resolve(domain, info):
if domain != 'py':
return None
if not info['module']:
return None
if not info['fullname']:
return None
package = info['module'].split('.')[0]
if not package.startswith('compas_occ'):
return None
module = importlib.import_module(info['module'])
parts = info['fullname'].split('.')
if len(parts) == 1:
obj = getattr(module, info['fullname'])
filename = inspect.getmodule(obj).__name__.replace('.', '/')
lineno = inspect.getsourcelines(obj)[1]
elif len(parts) == 2:
obj_name, attr_name = parts
obj = getattr(module, obj_name)
attr = getattr(obj, attr_name)
if inspect.isfunction(attr):
filename = inspect.getmodule(obj).__name__.replace('.', '/')
lineno = inspect.getsourcelines(attr)[1]
else:
return None
else:
return None
return f"https://github.com/compas-dev/compas_occ/blob/main/src/{filename}.py#L{lineno}"
# extlinks
extlinks = {}
# -- Options for HTML output ----------------------------------------------
html_theme = "compaspkg"
html_theme_path = sphinx_compas_theme.get_html_theme_path()
html_theme_options = {
"package_name" : "compas_occ",
"package_title" : project,
"package_version" : release,
"package_author" : "compas-dev",
"package_docs" : "https://compas.dev/compas_occ/",
"package_repo" : "https://github.com/compas-dev/compas_occ",
"package_old_versions_txt": "https://compas.dev/compas_occ/doc_versions.txt"
}
html_context = {}
html_static_path = []
html_extra_path = []
html_last_updated_fmt = ""
html_copy_source = False
html_show_sourcelink = False
html_permalinks = False
html_permalinks_icon = ""
html_compact_lists = True
| 24.786458 | 92 | 0.677663 |
import sys
import os
import inspect
import importlib
import sphinx_compas_theme
from sphinx.ext.napoleon.docstring import NumpyDocstring
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../src'))
project = "COMPAS OCC"
copyright = "Block Research Group - ETH Zurich"
author = "tom van mele"
release = "0.3.3"
version = ".".join(release.split(".")[0:2])
master_doc = "index"
source_suffix = [".rst", ]
templates_path = sphinx_compas_theme.get_autosummary_templates_path()
exclude_patterns = []
pygments_style = "sphinx"
show_authors = True
add_module_names = True
language = None
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.doctest",
"sphinx.ext.coverage",
"sphinx.ext.linkcode",
"sphinx.ext.extlinks",
"sphinx.ext.intersphinx",
"sphinx.ext.mathjax",
"sphinx.ext.napoleon",
"sphinx.ext.githubpages",
]
autodoc_mock_imports = [
"System",
"clr",
"Eto",
"Rhino",
"Grasshopper",
"scriptcontext",
"rhinoscriptsyntax",
"bpy",
"bmesh",
"mathutils"
]
autodoc_default_options = {
"undoc-members": True,
"show-inheritance": True,
}
autodoc_member_order = "alphabetical"
autoclass_content = "class"
def skip(app, what, name, obj, would_skip, options):
if name.startswith('_'):
return True
return would_skip
def setup(app):
app.connect("autodoc-skip-member", skip)
autosummary_generate = True
napoleon_google_docstring = False
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = False
napoleon_use_rtype = False
def parse_attributes_section(self, section):
return self._format_fields("Attributes", self._consume_fields())
NumpyDocstring._parse_attributes_section = parse_attributes_section
def patched_parse(self):
self._sections["attributes"] = self._parse_attributes_section
self._unpatched_parse()
NumpyDocstring._unpatched_parse = NumpyDocstring._parse
NumpyDocstring._parse = patched_parse
intersphinx_mapping = {
"python": ("https://docs.python.org/", None),
"compas": ("https://compas.dev/compas/latest/", None),
}
def linkcode_resolve(domain, info):
if domain != 'py':
return None
if not info['module']:
return None
if not info['fullname']:
return None
package = info['module'].split('.')[0]
if not package.startswith('compas_occ'):
return None
module = importlib.import_module(info['module'])
parts = info['fullname'].split('.')
if len(parts) == 1:
obj = getattr(module, info['fullname'])
filename = inspect.getmodule(obj).__name__.replace('.', '/')
lineno = inspect.getsourcelines(obj)[1]
elif len(parts) == 2:
obj_name, attr_name = parts
obj = getattr(module, obj_name)
attr = getattr(obj, attr_name)
if inspect.isfunction(attr):
filename = inspect.getmodule(obj).__name__.replace('.', '/')
lineno = inspect.getsourcelines(attr)[1]
else:
return None
else:
return None
return f"https://github.com/compas-dev/compas_occ/blob/main/src/{filename}.py#L{lineno}"
extlinks = {}
html_theme = "compaspkg"
html_theme_path = sphinx_compas_theme.get_html_theme_path()
html_theme_options = {
"package_name" : "compas_occ",
"package_title" : project,
"package_version" : release,
"package_author" : "compas-dev",
"package_docs" : "https://compas.dev/compas_occ/",
"package_repo" : "https://github.com/compas-dev/compas_occ",
"package_old_versions_txt": "https://compas.dev/compas_occ/doc_versions.txt"
}
html_context = {}
html_static_path = []
html_extra_path = []
html_last_updated_fmt = ""
html_copy_source = False
html_show_sourcelink = False
html_permalinks = False
html_permalinks_icon = ""
html_compact_lists = True
| true | true |
f71f24ad28b326dc68ea84401a829a4afe299d99 | 8,582 | py | Python | applications/FluidDynamicsApplication/python_scripts/navier_stokes_compressible_explicit_solver.py | philbucher/Kratos | 1ceb900dbacfab344e27e32285250eafc52093ec | [
"BSD-4-Clause"
] | 1 | 2021-08-29T11:20:11.000Z | 2021-08-29T11:20:11.000Z | applications/FluidDynamicsApplication/python_scripts/navier_stokes_compressible_explicit_solver.py | philbucher/Kratos | 1ceb900dbacfab344e27e32285250eafc52093ec | [
"BSD-4-Clause"
] | 1 | 2021-11-19T12:14:50.000Z | 2021-11-19T12:14:50.000Z | applications/FluidDynamicsApplication/python_scripts/navier_stokes_compressible_explicit_solver.py | philbucher/Kratos | 1ceb900dbacfab344e27e32285250eafc52093ec | [
"BSD-4-Clause"
] | null | null | null | from __future__ import print_function, absolute_import, division # makes KratosMultiphysics backward compatible with python 2.6 and 2.7
# Importing the Kratos Library
import KratosMultiphysics
import KratosMultiphysics.FluidDynamicsApplication as KratosFluid
## Import base class file
from KratosMultiphysics.FluidDynamicsApplication.fluid_solver import FluidSolver
from KratosMultiphysics import python_linear_solver_factory as linear_solver_factory
from KratosMultiphysics.FluidDynamicsApplication import check_and_prepare_model_process_fluid
def CreateSolver(model, custom_settings):
return NavierStokesCompressibleExplicitSolver(model, custom_settings)
class NavierStokesCompressibleExplicitSolver(FluidSolver):
def __init__(self, model, custom_settings):
# Call base fluid solver constructor
self._validate_settings_in_baseclass=True # To be removed eventually
super(NavierStokesCompressibleExplicitSolver,self).__init__(model,custom_settings)
# Define the formulation settings
self.element_name = "CompressibleNavierStokesExplicit"
if custom_settings["domain_size"].GetInt() == 2:
self.condition_name = "LineCondition" # TODO: We need to create a Compressible NS condition (now using the base ones)
elif custom_settings["domain_size"].GetInt() == 3:
self.condition_name = "SurfaceCondition" # TODO: We need to create a Compressible NS condition (now using the base ones)
else:
err_msg = "Wrong domain size "
raise Exception(err_msg)
self.min_buffer_size = 2
self.element_has_nodal_properties = False # Note that DENSITY is nodally stored but considered as a DOF
KratosMultiphysics.Logger.PrintInfo("::[NavierStokesCompressibleExplicitSolver]:: ","Construction of NavierStokesCompressibleExplicitSolver finished.")
@classmethod
def GetDefaultParameters(cls):
##settings string in json format
default_settings = KratosMultiphysics.Parameters("""
{
"solver_type": "compressible_solver_from_defaults",
"model_part_name": "FluidModelPart",
"domain_size": -1,
"model_import_settings": {
"input_type": "mdpa",
"input_filename": "",
"reorder": false
},
"material_import_settings": {
"materials_filename": "FluidMaterials.json"
},
"echo_level": 1,
"time_order": 2,
"time_scheme" : "RK4",
"move_mesh_flag": false,
"shock_capturing": true,
"compute_reactions": false,
"reform_dofs_at_each_step" : false,
"assign_neighbour_elements_to_conditions": true,
"volume_model_part_name" : "volume_model_part",
"skin_parts": [""],
"no_skin_parts":[""],
"time_stepping" : {
"automatic_time_step" : true,
"CFL_number" : 1.0,
"minimum_delta_time" : 1.0e-8,
"maximum_delta_time" : 1.0e-2
},
"use_oss" : true
}""")
default_settings.AddMissingParameters(super().GetDefaultParameters())
return default_settings
def AddVariables(self):
# Add DOF variables (formulation written in conservative form) and reactions
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.DENSITY) # Density DOF
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.MOMENTUM) # Momentum DOF
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.TOTAL_ENERGY) # Total energy DOF
self.main_model_part.AddNodalSolutionStepVariable(KratosFluid.REACTION_DENSITY) # Density DOF reaction
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.REACTION) # Momentum DOF reaction
self.main_model_part.AddNodalSolutionStepVariable(KratosFluid.REACTION_ENERGY) # Total energy DOF reaction
# Required variables
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.BODY_FORCE)
self.main_model_part.AddNodalSolutionStepVariable(KratosFluid.MASS_SOURCE)
self.main_model_part.AddNodalSolutionStepVariable(KratosFluid.HEAT_SOURCE)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.NORMAL)
# Post-process variables
self.main_model_part.AddNodalSolutionStepVariable(KratosFluid.MACH)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.VELOCITY)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.PRESSURE)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.TEMPERATURE)
KratosMultiphysics.Logger.PrintInfo("::[NavierStokesCompressibleExplicitSolver]:: ","Explicit compressible fluid solver variables added correctly")
def AddDofs(self):
domain_size = self.main_model_part.ProcessInfo[KratosMultiphysics.DOMAIN_SIZE]
KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.DENSITY, KratosFluid.REACTION_DENSITY, self.main_model_part)
KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.MOMENTUM_X, KratosMultiphysics.REACTION_X, self.main_model_part)
KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.MOMENTUM_Y, KratosMultiphysics.REACTION_Y, self.main_model_part)
if domain_size == 3:
KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.MOMENTUM_Z, KratosMultiphysics.REACTION_Z, self.main_model_part)
KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.TOTAL_ENERGY, KratosFluid.REACTION_ENERGY, self.main_model_part)
def Initialize(self):
self.GetComputingModelPart().ProcessInfo[KratosMultiphysics.OSS_SWITCH] = int(self.settings["use_oss"].GetBool())
self.GetComputingModelPart().ProcessInfo[KratosFluid.SHOCK_CAPTURING_SWITCH] = int(self.settings["shock_capturing"].GetBool())
self.solver = self._get_solution_strategy()
self.solver.SetEchoLevel(self.settings["echo_level"].GetInt())
self.solver.Initialize()
KratosMultiphysics.Logger.PrintInfo("::[NavierStokesCompressibleExplicitSolver]:: ","Explicit compressible fluid solver initialization finished.")
def _get_solution_strategy(self):
if not hasattr(self, '_solution_strategy'):
self._solution_strategy = self._create_solution_strategy()
return self._solution_strategy
def _create_solution_strategy(self):
self.computing_model_part = self.GetComputingModelPart()
strategy_settings = KratosMultiphysics.Parameters('''{}''')
strategy_settings.AddEmptyValue("rebuild_level").SetInt(0 if self.settings["reform_dofs_at_each_step"].GetBool() else 1)
strategy_settings.AddEmptyValue("move_mesh_flag").SetBool(self.settings["move_mesh_flag"].GetBool())
strategy_settings.AddEmptyValue("shock_capturing").SetBool(self.settings["shock_capturing"].GetBool())
rk_parameter = self.settings["time_scheme"].GetString()
rk_startegies = {
"RK3-TVD": KratosFluid.CompressibleNavierStokesExplicitSolvingStrategyRungeKutta3TVD,
"RK4" : KratosFluid.CompressibleNavierStokesExplicitSolvingStrategyRungeKutta4
}
if rk_parameter in rk_startegies:
return rk_startegies[rk_parameter](self.computing_model_part, strategy_settings)
err_msg = "Runge-Kutta method of type '{}' not available. Try any of\n".format(rk_parameter)
for key in rk_startegies:
err_msg = err_msg + " - {}\n".format(key)
raise RuntimeError(err_msg)
def _CreateEstimateDtUtility(self):
"""This method overloads FluidSolver in order to enforce:
```
self.settings["time_stepping"]["consider_compressibility_in_CFL"] == True
```
"""
if self.settings["time_stepping"].Has("consider_compressibility_in_CFL"):
KratosMultiphysics.Logger.PrintWarning("", "User-specifed consider_compressibility_in_CFL will be overriden with TRUE")
else:
self.settings["time_stepping"].AddEmptyValue("consider_compressibility_in_CFL")
self.settings["time_stepping"]["consider_compressibility_in_CFL"].SetBool(True)
estimate_dt_utility = KratosFluid.EstimateDtUtility(
self.GetComputingModelPart(),
self.settings["time_stepping"])
return estimate_dt_utility
| 53.304348 | 159 | 0.720228 | from __future__ import print_function, absolute_import, division
import KratosMultiphysics
import KratosMultiphysics.FluidDynamicsApplication as KratosFluid
.FluidDynamicsApplication.fluid_solver import FluidSolver
from KratosMultiphysics import python_linear_solver_factory as linear_solver_factory
from KratosMultiphysics.FluidDynamicsApplication import check_and_prepare_model_process_fluid
def CreateSolver(model, custom_settings):
return NavierStokesCompressibleExplicitSolver(model, custom_settings)
class NavierStokesCompressibleExplicitSolver(FluidSolver):
def __init__(self, model, custom_settings):
self._validate_settings_in_baseclass=True
super(NavierStokesCompressibleExplicitSolver,self).__init__(model,custom_settings)
self.element_name = "CompressibleNavierStokesExplicit"
if custom_settings["domain_size"].GetInt() == 2:
self.condition_name = "LineCondition"
elif custom_settings["domain_size"].GetInt() == 3:
self.condition_name = "SurfaceCondition"
else:
err_msg = "Wrong domain size "
raise Exception(err_msg)
self.min_buffer_size = 2
self.element_has_nodal_properties = False
KratosMultiphysics.Logger.PrintInfo("::[NavierStokesCompressibleExplicitSolver]:: ","Construction of NavierStokesCompressibleExplicitSolver finished.")
@classmethod
def GetDefaultParameters(cls):
tosMultiphysics.Parameters("""
{
"solver_type": "compressible_solver_from_defaults",
"model_part_name": "FluidModelPart",
"domain_size": -1,
"model_import_settings": {
"input_type": "mdpa",
"input_filename": "",
"reorder": false
},
"material_import_settings": {
"materials_filename": "FluidMaterials.json"
},
"echo_level": 1,
"time_order": 2,
"time_scheme" : "RK4",
"move_mesh_flag": false,
"shock_capturing": true,
"compute_reactions": false,
"reform_dofs_at_each_step" : false,
"assign_neighbour_elements_to_conditions": true,
"volume_model_part_name" : "volume_model_part",
"skin_parts": [""],
"no_skin_parts":[""],
"time_stepping" : {
"automatic_time_step" : true,
"CFL_number" : 1.0,
"minimum_delta_time" : 1.0e-8,
"maximum_delta_time" : 1.0e-2
},
"use_oss" : true
}""")
default_settings.AddMissingParameters(super().GetDefaultParameters())
return default_settings
def AddVariables(self):
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.DENSITY)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.MOMENTUM)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.TOTAL_ENERGY)
self.main_model_part.AddNodalSolutionStepVariable(KratosFluid.REACTION_DENSITY)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.REACTION)
self.main_model_part.AddNodalSolutionStepVariable(KratosFluid.REACTION_ENERGY)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.BODY_FORCE)
self.main_model_part.AddNodalSolutionStepVariable(KratosFluid.MASS_SOURCE)
self.main_model_part.AddNodalSolutionStepVariable(KratosFluid.HEAT_SOURCE)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.NORMAL)
self.main_model_part.AddNodalSolutionStepVariable(KratosFluid.MACH)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.VELOCITY)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.PRESSURE)
self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.TEMPERATURE)
KratosMultiphysics.Logger.PrintInfo("::[NavierStokesCompressibleExplicitSolver]:: ","Explicit compressible fluid solver variables added correctly")
def AddDofs(self):
domain_size = self.main_model_part.ProcessInfo[KratosMultiphysics.DOMAIN_SIZE]
KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.DENSITY, KratosFluid.REACTION_DENSITY, self.main_model_part)
KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.MOMENTUM_X, KratosMultiphysics.REACTION_X, self.main_model_part)
KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.MOMENTUM_Y, KratosMultiphysics.REACTION_Y, self.main_model_part)
if domain_size == 3:
KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.MOMENTUM_Z, KratosMultiphysics.REACTION_Z, self.main_model_part)
KratosMultiphysics.VariableUtils().AddDof(KratosMultiphysics.TOTAL_ENERGY, KratosFluid.REACTION_ENERGY, self.main_model_part)
def Initialize(self):
self.GetComputingModelPart().ProcessInfo[KratosMultiphysics.OSS_SWITCH] = int(self.settings["use_oss"].GetBool())
self.GetComputingModelPart().ProcessInfo[KratosFluid.SHOCK_CAPTURING_SWITCH] = int(self.settings["shock_capturing"].GetBool())
self.solver = self._get_solution_strategy()
self.solver.SetEchoLevel(self.settings["echo_level"].GetInt())
self.solver.Initialize()
KratosMultiphysics.Logger.PrintInfo("::[NavierStokesCompressibleExplicitSolver]:: ","Explicit compressible fluid solver initialization finished.")
def _get_solution_strategy(self):
if not hasattr(self, '_solution_strategy'):
self._solution_strategy = self._create_solution_strategy()
return self._solution_strategy
def _create_solution_strategy(self):
self.computing_model_part = self.GetComputingModelPart()
strategy_settings = KratosMultiphysics.Parameters('''{}''')
strategy_settings.AddEmptyValue("rebuild_level").SetInt(0 if self.settings["reform_dofs_at_each_step"].GetBool() else 1)
strategy_settings.AddEmptyValue("move_mesh_flag").SetBool(self.settings["move_mesh_flag"].GetBool())
strategy_settings.AddEmptyValue("shock_capturing").SetBool(self.settings["shock_capturing"].GetBool())
rk_parameter = self.settings["time_scheme"].GetString()
rk_startegies = {
"RK3-TVD": KratosFluid.CompressibleNavierStokesExplicitSolvingStrategyRungeKutta3TVD,
"RK4" : KratosFluid.CompressibleNavierStokesExplicitSolvingStrategyRungeKutta4
}
if rk_parameter in rk_startegies:
return rk_startegies[rk_parameter](self.computing_model_part, strategy_settings)
err_msg = "Runge-Kutta method of type '{}' not available. Try any of\n".format(rk_parameter)
for key in rk_startegies:
err_msg = err_msg + " - {}\n".format(key)
raise RuntimeError(err_msg)
def _CreateEstimateDtUtility(self):
if self.settings["time_stepping"].Has("consider_compressibility_in_CFL"):
KratosMultiphysics.Logger.PrintWarning("", "User-specifed consider_compressibility_in_CFL will be overriden with TRUE")
else:
self.settings["time_stepping"].AddEmptyValue("consider_compressibility_in_CFL")
self.settings["time_stepping"]["consider_compressibility_in_CFL"].SetBool(True)
estimate_dt_utility = KratosFluid.EstimateDtUtility(
self.GetComputingModelPart(),
self.settings["time_stepping"])
return estimate_dt_utility
| true | true |
f71f25cea0ab34d883dca3594f5c11497fd8475d | 435 | py | Python | accounts/migrations/0025_auto_20170817_1935.py | enrobyn/lookit-api | 621fbb8b25100a21fd94721d39003b5d4f651dc5 | [
"MIT"
] | null | null | null | accounts/migrations/0025_auto_20170817_1935.py | enrobyn/lookit-api | 621fbb8b25100a21fd94721d39003b5d4f651dc5 | [
"MIT"
] | null | null | null | accounts/migrations/0025_auto_20170817_1935.py | enrobyn/lookit-api | 621fbb8b25100a21fd94721d39003b5d4f651dc5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-17 19:35
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0024_auto_20170814_1542'),
]
operations = [
migrations.RenameField(
model_name='user',
old_name='contact_name',
new_name='nickname',
),
]
| 20.714286 | 48 | 0.611494 |
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0024_auto_20170814_1542'),
]
operations = [
migrations.RenameField(
model_name='user',
old_name='contact_name',
new_name='nickname',
),
]
| true | true |
f71f26b1e606b6bc72ed061535852408aecdbd21 | 5,655 | py | Python | oauth2_backend/admin.py | upeu-001-pro/upeuauth-serve | 17b204b6df4c0f09340befd471de56369b4b90c7 | [
"MIT"
] | null | null | null | oauth2_backend/admin.py | upeu-001-pro/upeuauth-serve | 17b204b6df4c0f09340befd471de56369b4b90c7 | [
"MIT"
] | null | null | null | oauth2_backend/admin.py | upeu-001-pro/upeuauth-serve | 17b204b6df4c0f09340befd471de56369b4b90c7 | [
"MIT"
] | 1 | 2021-01-03T22:25:03.000Z | 2021-01-03T22:25:03.000Z |
from django.contrib import admin
from django.contrib.auth.models import Group
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
# Register your models here.
from .models.person import Person
from .models.user import User
from .models.hierarchy_type import HierarchyType
from .models.hierarchy import Hierarchy
from .models.menu import Menu
from .models.user_hierarchy_group import UserHierarchyGroup
from .models.user_hierarchy_permission import UserHierarchyPermission
from .models.person import Religion, Ethnicity, Occupation, EducationLevel
from .models.person import PensionScheme
from .models.person_address import PersonAddressType, PersonAddress
from .models.person_document import DocumentType, PersonDocument
from .models.person_phone import PersonPhoneType, PersonPhone
admin.site.register(ContentType)
class PermissionAdmin(admin.ModelAdmin):
list_display = ("codename", "name", "content_type")
search_fields = ("codename", "name", "content_type__app_label")
admin.site.register(Permission, PermissionAdmin)
'''
admin.site.register(Hierarchy)
admin.site.register(HierarchyType)
admin.site.register(UserHierarchyGroup)
admin.site.register(UserHierarchyPermission)
admin.site.register(Menu)
'''
admin.site.register(Person)
admin.site.register(Religion)
admin.site.register(Ethnicity)
admin.site.register(Occupation)
admin.site.register(EducationLevel)
admin.site.register(PensionScheme)
admin.site.register(PersonAddressType)
admin.site.register(PersonAddress)
admin.site.register(DocumentType)
admin.site.register(PersonDocument)
admin.site.register(PersonPhoneType)
admin.site.register(PersonPhone)
# forms
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth import get_user_model
CHOICES = (('ON', 'ON'),
('OFF', 'OFF'),
)
class MyUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = User # get_user_model()
class MyUserChangeForm(UserChangeForm):
description = forms.CharField(
label=_('Description'), required=False, initial='edit',
widget=forms.Textarea)
# is_staff = forms.ChoiceField(widget=forms.RadioSelect, choices=CHOICES)
class Meta(UserChangeForm.Meta):
model = User # get_user_model()
class MyUserAdmin(UserAdmin):
""" """
fieldsets = (
(None, {'fields': ('username', 'password')}),
(_('Personal info'),
{'fields': ('email',)}),
(_('Permissions'), {'fields': ('is_active', 'description', 'is_staff',
'is_superuser', 'groups',
'user_permissions')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
)
form = MyUserChangeForm
add_form = MyUserCreationForm
list_display = ('username', 'email',
'first_name', 'last_name', 'is_staff', ) # 'status'
list_filter = ('is_staff', 'is_superuser',
'is_active', 'groups', 'date_joined')
#date_hierarchy = 'date_joined'
def status(self, obj):
return obj.status
status.admin_order_field = 'status'
status.short_description = 'status'
# raw_id_fields = ('person',)
'''
def save_model(self, request, obj, form, change):
if obj.pk:
if obj.is_active:
if UserStatus.objects.filter(user=obj.pk).count() > 0:
if UserStatus.objects.filter(user=obj.pk).latest('id').status != ON:
UserStatus.objects.create(
status=ON,
description=form.cleaned_data['description'], user=obj)
else: # no tiene registros en UserStatus
UserStatus.objects.create(
status=ON,
description=form.cleaned_data['description'], user=obj)
else:
if UserStatus.objects.filter(user=obj.pk).count() > 0:
if UserStatus.objects.filter(user=obj.pk).latest('id').status != OFF:
UserStatus.objects.create(
status=OFF,
description=form.cleaned_data['description'], user=obj)
else:
UserStatus.objects.create(
status=OFF,
description=form.cleaned_data['description'], user=obj)
obj.save()
'''
def get_queryset(self, request):
qs = super(MyUserAdmin, self).get_queryset(request)
# qr = qs.with_status() # add 'status' colum
# print qr
return qs
'''
def formfield_for_choice_field(self, db_field, request, **kwargs):
if db_field.name == 'status':
kwargs['choices'] = (
(ON, 'Accepted'),
(OFF, 'Denied'),
(True, 'Denied'),
(False, 'Denied'),
(None, 'Denied'),
(0, 'Denied'),
(1, 'Denied'),
('0', 'Denied'),
('1', 'Denied'),
('True', 'Denied'),
('False', 'Denied'),
)
# db_field['status'].choices = (
# (ON, 'Accepted'),
# (OFF, 'Denied'),
# )
return super(MyUserAdmin, self).formfield_for_choice_field(db_field,
request, **kwargs)
'''
admin.site.register(User, MyUserAdmin)
| 32.877907 | 89 | 0.618921 |
from django.contrib import admin
from django.contrib.auth.models import Group
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from .models.person import Person
from .models.user import User
from .models.hierarchy_type import HierarchyType
from .models.hierarchy import Hierarchy
from .models.menu import Menu
from .models.user_hierarchy_group import UserHierarchyGroup
from .models.user_hierarchy_permission import UserHierarchyPermission
from .models.person import Religion, Ethnicity, Occupation, EducationLevel
from .models.person import PensionScheme
from .models.person_address import PersonAddressType, PersonAddress
from .models.person_document import DocumentType, PersonDocument
from .models.person_phone import PersonPhoneType, PersonPhone
admin.site.register(ContentType)
class PermissionAdmin(admin.ModelAdmin):
list_display = ("codename", "name", "content_type")
search_fields = ("codename", "name", "content_type__app_label")
admin.site.register(Permission, PermissionAdmin)
admin.site.register(Person)
admin.site.register(Religion)
admin.site.register(Ethnicity)
admin.site.register(Occupation)
admin.site.register(EducationLevel)
admin.site.register(PensionScheme)
admin.site.register(PersonAddressType)
admin.site.register(PersonAddress)
admin.site.register(DocumentType)
admin.site.register(PersonDocument)
admin.site.register(PersonPhoneType)
admin.site.register(PersonPhone)
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth import get_user_model
CHOICES = (('ON', 'ON'),
('OFF', 'OFF'),
)
class MyUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = User
class MyUserChangeForm(UserChangeForm):
description = forms.CharField(
label=_('Description'), required=False, initial='edit',
widget=forms.Textarea)
class Meta(UserChangeForm.Meta):
model = User
class MyUserAdmin(UserAdmin):
fieldsets = (
(None, {'fields': ('username', 'password')}),
(_('Personal info'),
{'fields': ('email',)}),
(_('Permissions'), {'fields': ('is_active', 'description', 'is_staff',
'is_superuser', 'groups',
'user_permissions')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
)
form = MyUserChangeForm
add_form = MyUserCreationForm
list_display = ('username', 'email',
'first_name', 'last_name', 'is_staff', )
list_filter = ('is_staff', 'is_superuser',
'is_active', 'groups', 'date_joined')
def status(self, obj):
return obj.status
status.admin_order_field = 'status'
status.short_description = 'status'
def get_queryset(self, request):
qs = super(MyUserAdmin, self).get_queryset(request)
turn qs
admin.site.register(User, MyUserAdmin)
| true | true |
f71f26b91d28f2fac45042a51478207f81d2160f | 2,660 | py | Python | python/day14.py | karlwnw/adventofcode2019 | 7a01a0dd9c3f93ae3f9aa123a91641a37289eb7a | [
"MIT"
] | 2 | 2020-01-02T12:59:44.000Z | 2020-01-04T19:21:31.000Z | python/day14.py | karlwnw/adventofcode2019 | 7a01a0dd9c3f93ae3f9aa123a91641a37289eb7a | [
"MIT"
] | null | null | null | python/day14.py | karlwnw/adventofcode2019 | 7a01a0dd9c3f93ae3f9aa123a91641a37289eb7a | [
"MIT"
] | null | null | null | import re
import math
from collections import defaultdict
def parse(content):
return list(map(parse_line, content.strip().split("\n")))
def parse_line(row):
matches = re.findall(r"\s?(\d+) ([A-Z]+),? ", row.strip())
inputs = [(int(item[0]), item[1]) for item in matches]
output = re.match(r".+ => (\d+) ([A-Z]+)$", row.strip()).groups()
return inputs, (int(output[0]), output[1])
def requirements_mapping(reactions):
# Verify that there is only one rule per Chemical
assert len(reactions) == len(set(row[-1][1] for row in reactions))
return {row[-1][1]: (row[-1][0], row[0]) for row in reactions}
def min_usage(reactions, C="FUEL", I="ORE", how_many=1, usage=None, leftovers=None):
if usage is None:
usage = defaultdict(int)
if leftovers is None:
leftovers = defaultdict(int)
usage[C] += how_many
# if C == I:
if C not in reactions: # Generalize for any (C, I) pair
return usage, leftovers
extra = min(how_many, leftovers[C])
how_many -= extra
leftovers[C] -= extra
quantity, inputs = reactions[C]
coef = math.ceil(how_many / quantity)
for qty, name in inputs:
usage, leftovers = min_usage(reactions, name, I, coef * qty, usage, leftovers)
leftovers[C] += coef * quantity - how_many
return usage, defaultdict(int, {k: v for k, v in leftovers.items() if v})
def binary_search(func, low, high, expected):
while low < high:
mid = (low + high) // 2
result = func(mid)
if result < expected:
low = mid
else:
high = mid - 1
return low
def get_max_fuel(reactions, max_ore=1e12):
f = lambda x: min_usage(reactions, how_many=x)[0]["ORE"]
return binary_search(f, 0, 1000000, max_ore)
if __name__ == "__main__":
with open("../inputs/day14.input") as f:
reactions = parse(f.read())
mapping = requirements_mapping(reactions)
# Part I
necessary, waste = min_usage(mapping)
print(necessary["ORE"]) # 2486514
# Part II
value = get_max_fuel(mapping, 1e12)
print(value) # 998536
# Verify that we got the correct value
necessary, _ = min_usage(mapping, how_many=value)
assert necessary["ORE"] < 1e12
necessary, _ = min_usage(mapping, how_many=value + 1)
assert necessary["ORE"] > 1e12
# Actually, this could be solved linearly in constant time with 2 data points
x1, y1 = 1, 2486514
x2, y2 = 10000000, min_usage(mapping, how_many=10000000)[0]["ORE"]
# y = ax + b
slope = (y2 - y1) / (x2 - x1)
b = y1 - slope * x1
fuel = round((1e12 - b) / slope)
assert fuel == value
| 27.142857 | 86 | 0.616917 | import re
import math
from collections import defaultdict
def parse(content):
return list(map(parse_line, content.strip().split("\n")))
def parse_line(row):
matches = re.findall(r"\s?(\d+) ([A-Z]+),? ", row.strip())
inputs = [(int(item[0]), item[1]) for item in matches]
output = re.match(r".+ => (\d+) ([A-Z]+)$", row.strip()).groups()
return inputs, (int(output[0]), output[1])
def requirements_mapping(reactions):
assert len(reactions) == len(set(row[-1][1] for row in reactions))
return {row[-1][1]: (row[-1][0], row[0]) for row in reactions}
def min_usage(reactions, C="FUEL", I="ORE", how_many=1, usage=None, leftovers=None):
if usage is None:
usage = defaultdict(int)
if leftovers is None:
leftovers = defaultdict(int)
usage[C] += how_many
if C not in reactions:
return usage, leftovers
extra = min(how_many, leftovers[C])
how_many -= extra
leftovers[C] -= extra
quantity, inputs = reactions[C]
coef = math.ceil(how_many / quantity)
for qty, name in inputs:
usage, leftovers = min_usage(reactions, name, I, coef * qty, usage, leftovers)
leftovers[C] += coef * quantity - how_many
return usage, defaultdict(int, {k: v for k, v in leftovers.items() if v})
def binary_search(func, low, high, expected):
while low < high:
mid = (low + high) // 2
result = func(mid)
if result < expected:
low = mid
else:
high = mid - 1
return low
def get_max_fuel(reactions, max_ore=1e12):
f = lambda x: min_usage(reactions, how_many=x)[0]["ORE"]
return binary_search(f, 0, 1000000, max_ore)
if __name__ == "__main__":
with open("../inputs/day14.input") as f:
reactions = parse(f.read())
mapping = requirements_mapping(reactions)
necessary, waste = min_usage(mapping)
print(necessary["ORE"])
value = get_max_fuel(mapping, 1e12)
print(value)
necessary, _ = min_usage(mapping, how_many=value)
assert necessary["ORE"] < 1e12
necessary, _ = min_usage(mapping, how_many=value + 1)
assert necessary["ORE"] > 1e12
x1, y1 = 1, 2486514
x2, y2 = 10000000, min_usage(mapping, how_many=10000000)[0]["ORE"]
slope = (y2 - y1) / (x2 - x1)
b = y1 - slope * x1
fuel = round((1e12 - b) / slope)
assert fuel == value
| true | true |
f71f2764a5a988302536ffa630ea868e4660c75c | 3,958 | py | Python | ramile/project.py | Jeff-Tian/ramile | 367beefea0b764527026dfdeb6ca951c41d89a7b | [
"MIT"
] | 1 | 2019-05-17T08:56:15.000Z | 2019-05-17T08:56:15.000Z | ramile/project.py | Jeff-Tian/ramile | 367beefea0b764527026dfdeb6ca951c41d89a7b | [
"MIT"
] | null | null | null | ramile/project.py | Jeff-Tian/ramile | 367beefea0b764527026dfdeb6ca951c41d89a7b | [
"MIT"
] | 1 | 2020-11-16T03:18:52.000Z | 2020-11-16T03:18:52.000Z | from docx import Document
from ramile.project_info import ProjectInfo
from ramile.project_processor import ProjectProcessor
from ramile.processors import FileProcessor
import os
class Project(object):
info = None
output = True
files = []
lines = []
def __init__(self, project_root, lines_to_extract=3000, output_file='extracted_code.docx', output=True):
self.info = ProjectInfo(project_root, lines_to_extract)
self.output = output
if output:
self.output_path = self.info.get_output_file_path(output_file)
# self.output_file = open(
# self.info.get_output_file_path(output_file), 'w+')
self.output_file = Document(os.path.join(
os.path.dirname(__file__), 'data/template.docx'))
self.paragraph = None
return
def run(self, output=True, echo=True):
if echo:
print("I'm going to extract %s lines from %s." %
(self.info.lines_to_extract, self.info.project_root))
self.info.lines_extracted = 0
project_processor = ProjectProcessor(self.info)
file_processor = FileProcessor()
# 1. Process and collect the files
self.files = project_processor.process()
# 2. Process each file
for file in self.files:
for output in file_processor.process(file):
self.export(output)
file.extracted_line()
if self.info.has_extracted_enough_lines():
break
# collect file summary
self.info.lines_skipped_blank += file.blank_lines
self.info.lines_skipped_comments += file.comment_lines
if self.info.has_extracted_enough_lines():
break
# self.output_file.close()
self.write_to_file()
if echo:
self.print_summary()
if not self.info.has_extracted_enough_lines():
print("Warning!! Not enough source code to extract %s lines!" %
self.info.lines_to_extract)
return
def print_summary(self):
print("The extraction is done. Here's the summary:")
print("Files that contributed to the output:")
for file in self.files:
if file.has_extracted_lines():
print("%s : %s lines" % (file.file_path, file.extracted_lines))
print("Code was extracted in: %s" % self.output_path)
print("Total extracted: %s lines" % self.info.lines_extracted)
print("Wrote to file: %s lines" % len(self.lines))
print("Total skipped comments: %s lines" %
self.info.lines_skipped_comments)
print("Total skipped blank lines: %s lines" %
self.info.lines_skipped_blank)
if self.info.lines_extracted > 3000:
print("Total skipped overflow lines: %s lines" %
(self.info.lines_extracted - len(self.lines)))
def export(self, line):
max_length_of_line = 60
appended = 0
while appended < len(line):
l = line[appended:appended+max_length_of_line]
self.lines.append(l)
self.info.lines_extracted += 1
appended += len(l)
return
def write_to_file(self):
if self.output:
if self.paragraph is None:
self.paragraph = self.output_file.paragraphs[0]
if self.info.lines_extracted > 3000:
lines_to_cut = self.info.lines_extracted - 3000
del self.lines[1501:1501+lines_to_cut]
i = 0
for line in self.lines:
i += 1
if i < 3000 and not line.endswith('\n'):
line += '\n'
if i == 3000 and line.endswith('\n'):
line = line[0:len(line)-1]
self.paragraph.add_run(line)
self.output_file.save(self.output_path)
return
| 36.311927 | 108 | 0.588934 | from docx import Document
from ramile.project_info import ProjectInfo
from ramile.project_processor import ProjectProcessor
from ramile.processors import FileProcessor
import os
class Project(object):
info = None
output = True
files = []
lines = []
def __init__(self, project_root, lines_to_extract=3000, output_file='extracted_code.docx', output=True):
self.info = ProjectInfo(project_root, lines_to_extract)
self.output = output
if output:
self.output_path = self.info.get_output_file_path(output_file)
self.output_file = Document(os.path.join(
os.path.dirname(__file__), 'data/template.docx'))
self.paragraph = None
return
def run(self, output=True, echo=True):
if echo:
print("I'm going to extract %s lines from %s." %
(self.info.lines_to_extract, self.info.project_root))
self.info.lines_extracted = 0
project_processor = ProjectProcessor(self.info)
file_processor = FileProcessor()
# 1. Process and collect the files
self.files = project_processor.process()
# 2. Process each file
for file in self.files:
for output in file_processor.process(file):
self.export(output)
file.extracted_line()
if self.info.has_extracted_enough_lines():
break
# collect file summary
self.info.lines_skipped_blank += file.blank_lines
self.info.lines_skipped_comments += file.comment_lines
if self.info.has_extracted_enough_lines():
break
# self.output_file.close()
self.write_to_file()
if echo:
self.print_summary()
if not self.info.has_extracted_enough_lines():
print("Warning!! Not enough source code to extract %s lines!" %
self.info.lines_to_extract)
return
def print_summary(self):
print("The extraction is done. Here's the summary:")
print("Files that contributed to the output:")
for file in self.files:
if file.has_extracted_lines():
print("%s : %s lines" % (file.file_path, file.extracted_lines))
print("Code was extracted in: %s" % self.output_path)
print("Total extracted: %s lines" % self.info.lines_extracted)
print("Wrote to file: %s lines" % len(self.lines))
print("Total skipped comments: %s lines" %
self.info.lines_skipped_comments)
print("Total skipped blank lines: %s lines" %
self.info.lines_skipped_blank)
if self.info.lines_extracted > 3000:
print("Total skipped overflow lines: %s lines" %
(self.info.lines_extracted - len(self.lines)))
def export(self, line):
max_length_of_line = 60
appended = 0
while appended < len(line):
l = line[appended:appended+max_length_of_line]
self.lines.append(l)
self.info.lines_extracted += 1
appended += len(l)
return
def write_to_file(self):
if self.output:
if self.paragraph is None:
self.paragraph = self.output_file.paragraphs[0]
if self.info.lines_extracted > 3000:
lines_to_cut = self.info.lines_extracted - 3000
del self.lines[1501:1501+lines_to_cut]
i = 0
for line in self.lines:
i += 1
if i < 3000 and not line.endswith('\n'):
line += '\n'
if i == 3000 and line.endswith('\n'):
line = line[0:len(line)-1]
self.paragraph.add_run(line)
self.output_file.save(self.output_path)
return
| true | true |
f71f27beb598ddbd1a04990c4e08324eafb725e5 | 3,297 | py | Python | Python/filter_dvl.py | markvilar/Cardinal | a3d87d34ed253a7a4400ed056c5d59c20f15973b | [
"Apache-2.0"
] | null | null | null | Python/filter_dvl.py | markvilar/Cardinal | a3d87d34ed253a7a4400ed056c5d59c20f15973b | [
"Apache-2.0"
] | null | null | null | Python/filter_dvl.py | markvilar/Cardinal | a3d87d34ed253a7a4400ed056c5d59c20f15973b | [
"Apache-2.0"
] | null | null | null | import argparse
import datetime
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
plt.style.use("./Styles/Scientific.mplstyle")
from typing import Dict, List
import data
import filters
import utilities
import utm
def filter_dvl(data_config: data.DataConfiguration, \
filter_config: filters.FilterConfiguration):
"""
"""
# Read data.
data = pd.read_csv(data_config.input)
# Extract relevant data for filtering.
time = data["Epoch"].to_numpy()
altitude = data["Altitude"].to_numpy()
# Calculate sampling frequency.
filter_config.sample_frequency = 1 / np.mean(time[1:] - time[0:-1])
# Add end values.
filtered_altitude = filters.add_appendage(altitude, filter_config)
# Filter data and account for time delay.
filtered_altitude, filter_delay = filters.FIR_filter(filtered_altitude, \
filter_config, axis=1)
filtered_time = time - filter_delay
print("\nDVL:")
print(" - Sampling time: {0:.4f}".format( \
1 / filter_config.sample_frequency))
print(" - Sampling frequency: {0:.4f}".format( \
filter_config.sample_frequency))
print(" - Filter time delay: {0:.4f}".format(filter_delay))
# Remove end values.
filtered_altitude = filters.remove_appendage(filtered_altitude, \
filter_config)
filtered_data = pd.DataFrame()
filtered_data["Epoch"] = filtered_time
filtered_data["Altitude"] = filtered_altitude
# Datetime calculations.
times = []
for epoch in filtered_data["Epoch"]:
time = datetime.datetime.fromtimestamp(epoch).strftime( \
data_config.datetime_format)
times.append(time)
filtered_data["Datetime"] = np.array(times, dtype=str)
# Save data.
if data_config.save_output:
filtered_data = pd.DataFrame(filtered_data)
filtered_data.to_csv(data_config.output + "ROV-DVL.csv", sep=',')
def main():
# Parse arguments.
parser = argparse.ArgumentParser( \
description="Filter DVL data with a FIR lowpass filter.")
parser.add_argument("input", type=str, help="Input file path.")
parser.add_argument("output", type=str, help="Output directory path.")
parser.add_argument("order", type=int, help="Filter order.")
parser.add_argument("cutoff", type=float, help="Filter cutoff.")
parser.add_argument("appendage", type=int, help="Filter appendage.")
parser.add_argument('--show_figures', type=bool, default=False, \
help= "Show figures.", action=argparse.BooleanOptionalAction)
parser.add_argument('--save_figures', type=bool, default=False, \
help= "Save figures.", action=argparse.BooleanOptionalAction)
parser.add_argument('--save_output', type=bool, default=False, \
help= "Save output.", action=argparse.BooleanOptionalAction)
args = parser.parse_args()
# Data configuration.
data_config = data.DataConfiguration(args.input, args.output, \
args.show_figures, args.save_figures, args.save_output)
# Filter configuration.
filter_config = filters.FilterConfiguration(args.order, args.cutoff, \
args.appendage)
# Filter data.
filter_dvl(data_config, filter_config)
if __name__ == '__main__':
main()
| 33.30303 | 77 | 0.693358 | import argparse
import datetime
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
plt.style.use("./Styles/Scientific.mplstyle")
from typing import Dict, List
import data
import filters
import utilities
import utm
def filter_dvl(data_config: data.DataConfiguration, \
filter_config: filters.FilterConfiguration):
data = pd.read_csv(data_config.input)
time = data["Epoch"].to_numpy()
altitude = data["Altitude"].to_numpy()
filter_config.sample_frequency = 1 / np.mean(time[1:] - time[0:-1])
filtered_altitude = filters.add_appendage(altitude, filter_config)
filtered_altitude, filter_delay = filters.FIR_filter(filtered_altitude, \
filter_config, axis=1)
filtered_time = time - filter_delay
print("\nDVL:")
print(" - Sampling time: {0:.4f}".format( \
1 / filter_config.sample_frequency))
print(" - Sampling frequency: {0:.4f}".format( \
filter_config.sample_frequency))
print(" - Filter time delay: {0:.4f}".format(filter_delay))
filtered_altitude = filters.remove_appendage(filtered_altitude, \
filter_config)
filtered_data = pd.DataFrame()
filtered_data["Epoch"] = filtered_time
filtered_data["Altitude"] = filtered_altitude
times = []
for epoch in filtered_data["Epoch"]:
time = datetime.datetime.fromtimestamp(epoch).strftime( \
data_config.datetime_format)
times.append(time)
filtered_data["Datetime"] = np.array(times, dtype=str)
if data_config.save_output:
filtered_data = pd.DataFrame(filtered_data)
filtered_data.to_csv(data_config.output + "ROV-DVL.csv", sep=',')
def main():
parser = argparse.ArgumentParser( \
description="Filter DVL data with a FIR lowpass filter.")
parser.add_argument("input", type=str, help="Input file path.")
parser.add_argument("output", type=str, help="Output directory path.")
parser.add_argument("order", type=int, help="Filter order.")
parser.add_argument("cutoff", type=float, help="Filter cutoff.")
parser.add_argument("appendage", type=int, help="Filter appendage.")
parser.add_argument('--show_figures', type=bool, default=False, \
help= "Show figures.", action=argparse.BooleanOptionalAction)
parser.add_argument('--save_figures', type=bool, default=False, \
help= "Save figures.", action=argparse.BooleanOptionalAction)
parser.add_argument('--save_output', type=bool, default=False, \
help= "Save output.", action=argparse.BooleanOptionalAction)
args = parser.parse_args()
data_config = data.DataConfiguration(args.input, args.output, \
args.show_figures, args.save_figures, args.save_output)
filter_config = filters.FilterConfiguration(args.order, args.cutoff, \
args.appendage)
filter_dvl(data_config, filter_config)
if __name__ == '__main__':
main()
| true | true |
f71f280244849be69121280ca690b37322b4626d | 3,220 | py | Python | setup.py | philastrophist/replicable | 1a5eead7e5a1e149d7818bedf4a985d837436157 | [
"MIT"
] | null | null | null | setup.py | philastrophist/replicable | 1a5eead7e5a1e149d7818bedf4a985d837436157 | [
"MIT"
] | null | null | null | setup.py | philastrophist/replicable | 1a5eead7e5a1e149d7818bedf4a985d837436157 | [
"MIT"
] | null | null | null | import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
# Package meta-data.
NAME = 'replicable'
DESCRIPTION = 'Reproducible storage of gridded and stochastically generated simulated datasets'
URL = 'https://github.com/philastrophist/replicable'
EMAIL = 'shaun.c.read@gmail.com'
AUTHOR = 'philastrophist'
# What packages are required for this module to be executed?
with open('requirements.txt', 'r') as f:
REQUIRED = f.readlines()
# The rest you shouldn't have to touch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.rst' is present in your MANIFEST.in file!
with io.open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = '\n' + f.read()
# Load the package's __version__.py module as a dictionary.
about = {}
with open(os.path.join(here, NAME, '__version__.py')) as f:
exec(f.read(), about)
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds...')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution...')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPi via Twine...')
os.system('twine upload dist/*')
sys.exit()
# Where the magic happens:
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
author=AUTHOR,
author_email=EMAIL,
url=URL,
packages=find_packages(exclude=('tests',)),
#If your package is a single module, use this instead of 'packages':
# py_modules=['mypackage'],
entry_points={},
install_requires=REQUIRED,
include_package_data=True,
license='MIT',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
# $ setup.py publish support.
cmdclass={
'upload': UploadCommand,
},
) | 30.377358 | 95 | 0.64472 | import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
NAME = 'replicable'
DESCRIPTION = 'Reproducible storage of gridded and stochastically generated simulated datasets'
URL = 'https://github.com/philastrophist/replicable'
EMAIL = 'shaun.c.read@gmail.com'
AUTHOR = 'philastrophist'
with open('requirements.txt', 'r') as f:
REQUIRED = f.readlines()
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.rst' is present in your MANIFEST.in file!
with io.open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = '\n' + f.read()
# Load the package's __version__.py module as a dictionary.
about = {}
with open(os.path.join(here, NAME, '__version__.py')) as f:
exec(f.read(), about)
class UploadCommand(Command):
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds...')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution...')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPi via Twine...')
os.system('twine upload dist/*')
sys.exit()
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
author=AUTHOR,
author_email=EMAIL,
url=URL,
packages=find_packages(exclude=('tests',)),
entry_points={},
install_requires=REQUIRED,
include_package_data=True,
license='MIT',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
cmdclass={
'upload': UploadCommand,
},
) | true | true |
f71f2c2568110ede12234a40ddb12f9d98cafa22 | 4,441 | py | Python | test/test_rfc1155.py | jpwarren/libsnmp | c676ce243bcf6ede2bb2534dceb9486971b6ff69 | [
"MIT"
] | 1 | 2019-12-02T04:07:23.000Z | 2019-12-02T04:07:23.000Z | test/test_rfc1155.py | jpwarren/libsnmp | c676ce243bcf6ede2bb2534dceb9486971b6ff69 | [
"MIT"
] | null | null | null | test/test_rfc1155.py | jpwarren/libsnmp | c676ce243bcf6ede2bb2534dceb9486971b6ff69 | [
"MIT"
] | 2 | 2019-12-02T04:07:30.000Z | 2019-12-02T04:18:57.000Z | #!/usr/bin/env python
# $Id$
# $Revision$
#
# libsnmp - a Python SNMP library
# Copyright (C) 2003 Unicity Pty Ltd <libsnmp@unicity.com.au>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Unit tests for the encoder/decoder
import unittest
import logging
import string
import sys
sys.path.append('../lib')
from libsnmp import util
from libsnmp import debug
from libsnmp import rfc1155
class EncoderTest(unittest.TestCase):
def setUp(self):
self.log = logging.getLogger('EncoderTest')
self.log.setLevel(logging.DEBUG)
return
def tearDown(self):
logging.shutdown()
return
def test_objectid_equality(self):
"""test equality of objects sourced from different initialisation values"""
input_a = [1,3,6,1,2,1,2,3,234,23,4,23,423,234,23423423,4234] # list
input_b = tuple(input_a) # tuple
input_c = '.'.join( [ str(x) for x in input_a] ) # string no leading dot
input_d = '.' + input_c # string leading dot
a = rfc1155.ObjectID(input_a)
b = rfc1155.ObjectID(input_b)
c = rfc1155.ObjectID(input_c)
d = rfc1155.ObjectID(input_d)
e = rfc1155.ObjectID('.1.3')
f = rfc1155.ObjectID().decode(a.encode())[0]
g = rfc1155.Asn1Object().decode(a.encode())[0]
self.assertEquals(a, a)
self.assertEquals(a, b)
self.assertEquals(a, c)
self.assertEquals(a, d)
self.assertNotEquals(a, e)
self.assertEquals(a, f)
self.assertEquals(a, g)
self.assertEquals(b, a)
self.assertEquals(b, b)
self.assertEquals(b, c)
self.assertEquals(b, d)
self.assertNotEquals(b, e)
self.assertEquals(b, f)
self.assertEquals(b, g)
pass
def test_integer(self):
a = rfc1155.Integer(0)
b = rfc1155.Integer(0x7FFFFFFF)
c = rfc1155.Integer(-1)
d = rfc1155.Integer(-0x7FFFFFF)
return
def test_ip_address(self):
addresses = (('0.0.0.0', '@\x04\x00\x00\x00\x00'),
('255.255.255.255', '@\x04\xff\xff\xff\xff'),
('1.2.3.4', '@\x04\x01\x02\x03\x04'),
('10.0.0.1', '@\x04\n\x00\x00\x01'),
('254.154.1.0', '@\x04\xfe\x9a\x01\x00'),
('0.0.0.1', '@\x04\x00\x00\x00\x01'),
('255.0.0.0', '@\x04\xff\x00\x00\x00'))
for input, output in addresses:
a = rfc1155.IPAddress(input)
raw = a.encode()
b = rfc1155.Asn1Object().decode(raw)[0]
self.assertEquals(a,b)
pass
return
def test_objectid_length(self):
"""test length"""
input_a = [1,3,6,1,2,1,2,3,234,23,4,23,423,234,23423423,4234] # list
input_b = tuple(input_a) # tuple
input_c = '.'.join( [ str(x) for x in input_a] ) # string no leading dot
input_d = '.' + input_c # string leading dot
a = rfc1155.ObjectID(input_a)
b = rfc1155.ObjectID(input_b)
c = rfc1155.ObjectID(input_c)
d = rfc1155.ObjectID(input_d)
e = rfc1155.ObjectID('.1.3')
self.assertEquals(len(a), len(input_a))
self.assertEquals(len(b), len(input_a))
self.assertEquals(len(c), len(input_a))
self.assertEquals(len(d), len(input_a))
self.assertNotEquals(len(b), len(e))
return
pass
if __name__ == '__main__':
unittest.main()
| 32.416058 | 83 | 0.563837 |
import unittest
import logging
import string
import sys
sys.path.append('../lib')
from libsnmp import util
from libsnmp import debug
from libsnmp import rfc1155
class EncoderTest(unittest.TestCase):
def setUp(self):
self.log = logging.getLogger('EncoderTest')
self.log.setLevel(logging.DEBUG)
return
def tearDown(self):
logging.shutdown()
return
def test_objectid_equality(self):
input_a = [1,3,6,1,2,1,2,3,234,23,4,23,423,234,23423423,4234]
input_b = tuple(input_a)
input_c = '.'.join( [ str(x) for x in input_a] )
input_d = '.' + input_c
a = rfc1155.ObjectID(input_a)
b = rfc1155.ObjectID(input_b)
c = rfc1155.ObjectID(input_c)
d = rfc1155.ObjectID(input_d)
e = rfc1155.ObjectID('.1.3')
f = rfc1155.ObjectID().decode(a.encode())[0]
g = rfc1155.Asn1Object().decode(a.encode())[0]
self.assertEquals(a, a)
self.assertEquals(a, b)
self.assertEquals(a, c)
self.assertEquals(a, d)
self.assertNotEquals(a, e)
self.assertEquals(a, f)
self.assertEquals(a, g)
self.assertEquals(b, a)
self.assertEquals(b, b)
self.assertEquals(b, c)
self.assertEquals(b, d)
self.assertNotEquals(b, e)
self.assertEquals(b, f)
self.assertEquals(b, g)
pass
def test_integer(self):
a = rfc1155.Integer(0)
b = rfc1155.Integer(0x7FFFFFFF)
c = rfc1155.Integer(-1)
d = rfc1155.Integer(-0x7FFFFFF)
return
def test_ip_address(self):
addresses = (('0.0.0.0', '@\x04\x00\x00\x00\x00'),
('255.255.255.255', '@\x04\xff\xff\xff\xff'),
('1.2.3.4', '@\x04\x01\x02\x03\x04'),
('10.0.0.1', '@\x04\n\x00\x00\x01'),
('254.154.1.0', '@\x04\xfe\x9a\x01\x00'),
('0.0.0.1', '@\x04\x00\x00\x00\x01'),
('255.0.0.0', '@\x04\xff\x00\x00\x00'))
for input, output in addresses:
a = rfc1155.IPAddress(input)
raw = a.encode()
b = rfc1155.Asn1Object().decode(raw)[0]
self.assertEquals(a,b)
pass
return
def test_objectid_length(self):
input_a = [1,3,6,1,2,1,2,3,234,23,4,23,423,234,23423423,4234]
input_b = tuple(input_a)
input_c = '.'.join( [ str(x) for x in input_a] )
input_d = '.' + input_c
a = rfc1155.ObjectID(input_a)
b = rfc1155.ObjectID(input_b)
c = rfc1155.ObjectID(input_c)
d = rfc1155.ObjectID(input_d)
e = rfc1155.ObjectID('.1.3')
self.assertEquals(len(a), len(input_a))
self.assertEquals(len(b), len(input_a))
self.assertEquals(len(c), len(input_a))
self.assertEquals(len(d), len(input_a))
self.assertNotEquals(len(b), len(e))
return
pass
if __name__ == '__main__':
unittest.main()
| true | true |
f71f2c91abcfe2546945aacdf89dff8367f6537d | 261 | py | Python | python_exercises/01Lista_alunos/mostrar_alunos.py | Matheus-IT/lang-python-related | dd2e5d9b9f16d3838ba1670fdfcba1fa3fe305e9 | [
"MIT"
] | null | null | null | python_exercises/01Lista_alunos/mostrar_alunos.py | Matheus-IT/lang-python-related | dd2e5d9b9f16d3838ba1670fdfcba1fa3fe305e9 | [
"MIT"
] | null | null | null | python_exercises/01Lista_alunos/mostrar_alunos.py | Matheus-IT/lang-python-related | dd2e5d9b9f16d3838ba1670fdfcba1fa3fe305e9 | [
"MIT"
] | null | null | null | def mostrar(alunos):
print('='*25)
for cont in range(3):
print(f' {cont+1} aluno {alunos["nomes"][cont]}')
print(f' notas {alunos["1nota"][cont]:4.2f}, {alunos["2nota"][cont]:4.2f}, {alunos["3nota"][cont]:4.2f}')
print('='*25)
| 37.285714 | 114 | 0.54023 | def mostrar(alunos):
print('='*25)
for cont in range(3):
print(f' {cont+1} aluno {alunos["nomes"][cont]}')
print(f' notas {alunos["1nota"][cont]:4.2f}, {alunos["2nota"][cont]:4.2f}, {alunos["3nota"][cont]:4.2f}')
print('='*25)
| true | true |
f71f2caaa174ac105310b83de417f8e865ce00e6 | 991 | py | Python | 322CoinCange/CoinChange2.py | Easonyesheng/CodePractice | 91c8b09c278f5abb67e90f0096fc83bef975647b | [
"MIT"
] | null | null | null | 322CoinCange/CoinChange2.py | Easonyesheng/CodePractice | 91c8b09c278f5abb67e90f0096fc83bef975647b | [
"MIT"
] | null | null | null | 322CoinCange/CoinChange2.py | Easonyesheng/CodePractice | 91c8b09c278f5abb67e90f0096fc83bef975647b | [
"MIT"
] | null | null | null | """
给定不同面额的硬币 coins 和一个总金额 amount。编写一个函数来计算可以凑成总金额所需的最少的硬币个数。如果没有任何一种硬币组合能组成总金额,返回 -1。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/coin-change
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
# 用备忘录解决了重叠子问题
# 是一种剪枝
class Solution:
def coinChange(self,coins, amount: int):
# 备忘录
memo = dict()
def dp(n):
# 查备忘录,避免重复计算
if n in memo: return memo[n]
if n == 0: return 0
if n < 0: return -1
res = float('INF')
for coin in set(coins):
subproblem = dp(n - coin)
if subproblem == -1: continue
res = min(res, 1 + subproblem)
# 记入备忘录
memo[n] = res if res != float('INF') else -1
return memo[n]
return dp(amount)
if __name__ == "__main__":
s = Solution()
coins = [186,419,83,408]
amount = 6249
# coins = [1,3,5]
# amount = 11
print(s.coinChange(coins,amount)) | 23.595238 | 82 | 0.526741 |
class Solution:
def coinChange(self,coins, amount: int):
memo = dict()
def dp(n):
if n in memo: return memo[n]
if n == 0: return 0
if n < 0: return -1
res = float('INF')
for coin in set(coins):
subproblem = dp(n - coin)
if subproblem == -1: continue
res = min(res, 1 + subproblem)
memo[n] = res if res != float('INF') else -1
return memo[n]
return dp(amount)
if __name__ == "__main__":
s = Solution()
coins = [186,419,83,408]
amount = 6249
print(s.coinChange(coins,amount)) | true | true |
f71f2df9d428bc57d6384865ba76147ba636e02e | 178 | py | Python | prvsnlib/tasks/hostname.py | acoomans/prvsn | af6b313c2e779ae4e3a9cdba0b1c3a1f4b4c085e | [
"BSD-2-Clause"
] | null | null | null | prvsnlib/tasks/hostname.py | acoomans/prvsn | af6b313c2e779ae4e3a9cdba0b1c3a1f4b4c085e | [
"BSD-2-Clause"
] | null | null | null | prvsnlib/tasks/hostname.py | acoomans/prvsn | af6b313c2e779ae4e3a9cdba0b1c3a1f4b4c085e | [
"BSD-2-Clause"
] | null | null | null | import logging
from prvsnlib.utils.run import Run
def hostname(name, secure=False):
logging.header('Hostname ' + name)
Run(['hostnamectl', 'set-hostname', name]).run()
| 22.25 | 52 | 0.702247 | import logging
from prvsnlib.utils.run import Run
def hostname(name, secure=False):
logging.header('Hostname ' + name)
Run(['hostnamectl', 'set-hostname', name]).run()
| true | true |
f71f2e88ba4935400a5e83c4221be1e001cabc5e | 2,826 | py | Python | reviewboard/reviews/tests/test_status_update.py | amalik2/reviewboard | 676aa2dce38ce619a74f2d4cb3cfae9bce21416e | [
"MIT"
] | 2 | 2020-06-19T14:57:49.000Z | 2020-06-19T15:17:40.000Z | reviewboard/reviews/tests/test_status_update.py | amalik2/reviewboard | 676aa2dce38ce619a74f2d4cb3cfae9bce21416e | [
"MIT"
] | 1 | 2019-08-03T01:48:33.000Z | 2019-08-03T01:48:33.000Z | reviewboard/reviews/tests/test_status_update.py | amalik2/reviewboard | 676aa2dce38ce619a74f2d4cb3cfae9bce21416e | [
"MIT"
] | null | null | null | """Unit tests for reviewboard.reviews.models.base_comment.StatusUpdate."""
from __future__ import unicode_literals
from django.contrib.auth.models import AnonymousUser, Permission, User
from djblets.testing.decorators import add_fixtures
from reviewboard.accounts.models import LocalSiteProfile
from reviewboard.testing import TestCase
class StatusUpdateTests(TestCase):
"""Unit tests for reviewboard.reviews.models.base_comment.StatusUpdate."""
fixtures = ['test_users']
def test_is_mutable_by_with_anonymous(self):
"""Testing StatusUpdate.is_mutable_by with anonymous user"""
review_request = self.create_review_request()
status_update = self.create_status_update(review_request)
self.assertFalse(status_update.is_mutable_by(AnonymousUser()))
def test_is_mutable_by_with_owner(self):
"""Testing StatusUpdate.is_mutable_by with owner"""
review_request = self.create_review_request()
status_update = self.create_status_update(review_request)
self.assertTrue(status_update.is_mutable_by(status_update.user))
def test_is_mutable_by_with_other_user(self):
"""Testing StatusUpdate.is_mutable_by with other user"""
other_user = User.objects.create(username='other-user')
review_request = self.create_review_request()
status_update = self.create_status_update(review_request)
self.assertFalse(status_update.is_mutable_by(other_user))
def test_is_mutable_by_with_other_user_and_can_change_status_perm(self):
"""Testing StatusUpdate.is_mutable_by with other user with
change_statusupdate permission
"""
other_user = User.objects.create(username='other-user')
other_user.user_permissions.add(
Permission.objects.get(codename='change_statusupdate'))
review_request = self.create_review_request()
status_update = self.create_status_update(review_request)
self.assertTrue(status_update.is_mutable_by(other_user))
@add_fixtures(['test_site'])
def test_is_mutable_by_with_other_user_with_perm_same_local_site(self):
"""Testing StatusUpdate.is_mutable_by with other user on same
LocalSite with change_statusupdate permission
"""
review_request = self.create_review_request(with_local_site=True)
status_update = self.create_status_update(review_request)
other_user = User.objects.create(username='other-user')
site = review_request.local_site
site.users.add(other_user)
site_profile = other_user.get_site_profile(site)
site_profile.permissions = {
'reviews.change_statusupdate': True,
}
site_profile.save(update_fields=('permissions',))
self.assertTrue(status_update.is_mutable_by(other_user))
| 39.25 | 78 | 0.7431 |
from __future__ import unicode_literals
from django.contrib.auth.models import AnonymousUser, Permission, User
from djblets.testing.decorators import add_fixtures
from reviewboard.accounts.models import LocalSiteProfile
from reviewboard.testing import TestCase
class StatusUpdateTests(TestCase):
fixtures = ['test_users']
def test_is_mutable_by_with_anonymous(self):
review_request = self.create_review_request()
status_update = self.create_status_update(review_request)
self.assertFalse(status_update.is_mutable_by(AnonymousUser()))
def test_is_mutable_by_with_owner(self):
review_request = self.create_review_request()
status_update = self.create_status_update(review_request)
self.assertTrue(status_update.is_mutable_by(status_update.user))
def test_is_mutable_by_with_other_user(self):
other_user = User.objects.create(username='other-user')
review_request = self.create_review_request()
status_update = self.create_status_update(review_request)
self.assertFalse(status_update.is_mutable_by(other_user))
def test_is_mutable_by_with_other_user_and_can_change_status_perm(self):
other_user = User.objects.create(username='other-user')
other_user.user_permissions.add(
Permission.objects.get(codename='change_statusupdate'))
review_request = self.create_review_request()
status_update = self.create_status_update(review_request)
self.assertTrue(status_update.is_mutable_by(other_user))
@add_fixtures(['test_site'])
def test_is_mutable_by_with_other_user_with_perm_same_local_site(self):
review_request = self.create_review_request(with_local_site=True)
status_update = self.create_status_update(review_request)
other_user = User.objects.create(username='other-user')
site = review_request.local_site
site.users.add(other_user)
site_profile = other_user.get_site_profile(site)
site_profile.permissions = {
'reviews.change_statusupdate': True,
}
site_profile.save(update_fields=('permissions',))
self.assertTrue(status_update.is_mutable_by(other_user))
| true | true |
f71f2eef58a8a1cbd7ca50a24cc800b485725414 | 458 | py | Python | docs/scripts/ex_sinews.py | natalia-rubio/py_grama | 968c1c0238d7165de3b1b96534791feacc4aa960 | [
"MIT"
] | 13 | 2020-02-24T16:51:51.000Z | 2022-03-30T18:56:55.000Z | docs/scripts/ex_sinews.py | natalia-rubio/py_grama | 968c1c0238d7165de3b1b96534791feacc4aa960 | [
"MIT"
] | 78 | 2019-12-30T19:13:21.000Z | 2022-02-23T18:17:54.000Z | docs/scripts/ex_sinews.py | natalia-rubio/py_grama | 968c1c0238d7165de3b1b96534791feacc4aa960 | [
"MIT"
] | 7 | 2020-10-19T17:49:25.000Z | 2021-08-15T20:46:52.000Z | import grama as gr
import pandas as pd
import matplotlib.pyplot as plt
from grama.models import make_cantilever_beam
md_beam = make_cantilever_beam()
md_beam >> \
gr.ev_sinews(n_density=50, n_sweeps=10, df_det="nom", skip=True) >> \
gr.pt_auto()
plt.savefig("../images/ex_beam_sinews_doe.png")
md_beam >> \
gr.ev_sinews(n_density=50, n_sweeps=10, df_det="nom", skip=False) >> \
gr.pt_auto()
plt.savefig("../images/ex_beam_sinews_res.png")
| 26.941176 | 74 | 0.722707 | import grama as gr
import pandas as pd
import matplotlib.pyplot as plt
from grama.models import make_cantilever_beam
md_beam = make_cantilever_beam()
md_beam >> \
gr.ev_sinews(n_density=50, n_sweeps=10, df_det="nom", skip=True) >> \
gr.pt_auto()
plt.savefig("../images/ex_beam_sinews_doe.png")
md_beam >> \
gr.ev_sinews(n_density=50, n_sweeps=10, df_det="nom", skip=False) >> \
gr.pt_auto()
plt.savefig("../images/ex_beam_sinews_res.png")
| true | true |
f71f325b5474aeccd5e07a92e013e1262655e374 | 1,320 | py | Python | tests/maps_tests/test_setitem.py | lycantropos/dendroid | 4315673ef52129909617225df6357416c56a84b3 | [
"MIT"
] | null | null | null | tests/maps_tests/test_setitem.py | lycantropos/dendroid | 4315673ef52129909617225df6357416c56a84b3 | [
"MIT"
] | 16 | 2019-11-02T10:44:20.000Z | 2020-09-21T15:22:29.000Z | tests/maps_tests/test_setitem.py | lycantropos/dendroid | 4315673ef52129909617225df6357416c56a84b3 | [
"MIT"
] | 1 | 2020-03-13T08:41:39.000Z | 2020-03-13T08:41:39.000Z | from copy import copy
from typing import Tuple
from hypothesis import given
from dendroid.hints import Item
from tests.utils import (Map,
is_left_subtree_less_than_right_subtree,
to_height,
to_max_binary_tree_height,
to_min_binary_tree_height)
from . import strategies
@given(strategies.maps_with_items)
def test_properties(map_with_item: Tuple[Map, Item]) -> None:
map_, (key, value) = map_with_item
map_[key] = value
tree = map_.tree
assert len(map_) > 0
assert (max(0, to_min_binary_tree_height(tree))
<= to_height(tree)
<= to_max_binary_tree_height(tree))
assert is_left_subtree_less_than_right_subtree(tree)
@given(strategies.empty_maps_with_items)
def test_base_case(map_with_item: Tuple[Map, Item]) -> None:
map_, (key, value) = map_with_item
map_[key] = value
assert len(map_) == 1
assert key in map_
assert map_[key] is value
@given(strategies.non_empty_maps_with_items)
def test_step(map_with_item: Tuple[Map, Item]) -> None:
map_, (key, value) = map_with_item
original = copy(map_)
map_[key] = value
assert len(map_) == len(original) + (key not in original)
assert key in map_
assert map_[key] is value
| 26.4 | 65 | 0.665909 | from copy import copy
from typing import Tuple
from hypothesis import given
from dendroid.hints import Item
from tests.utils import (Map,
is_left_subtree_less_than_right_subtree,
to_height,
to_max_binary_tree_height,
to_min_binary_tree_height)
from . import strategies
@given(strategies.maps_with_items)
def test_properties(map_with_item: Tuple[Map, Item]) -> None:
map_, (key, value) = map_with_item
map_[key] = value
tree = map_.tree
assert len(map_) > 0
assert (max(0, to_min_binary_tree_height(tree))
<= to_height(tree)
<= to_max_binary_tree_height(tree))
assert is_left_subtree_less_than_right_subtree(tree)
@given(strategies.empty_maps_with_items)
def test_base_case(map_with_item: Tuple[Map, Item]) -> None:
map_, (key, value) = map_with_item
map_[key] = value
assert len(map_) == 1
assert key in map_
assert map_[key] is value
@given(strategies.non_empty_maps_with_items)
def test_step(map_with_item: Tuple[Map, Item]) -> None:
map_, (key, value) = map_with_item
original = copy(map_)
map_[key] = value
assert len(map_) == len(original) + (key not in original)
assert key in map_
assert map_[key] is value
| true | true |
f71f354e1d8b8af7917824080fcc689db368b6da | 3,775 | py | Python | forecaster/func.py | ahmed-f-alrefaie/forecaster | 25b73a533f6195f3e5c703730e63cb3e242c649a | [
"MIT"
] | null | null | null | forecaster/func.py | ahmed-f-alrefaie/forecaster | 25b73a533f6195f3e5c703730e63cb3e242c649a | [
"MIT"
] | null | null | null | forecaster/func.py | ahmed-f-alrefaie/forecaster | 25b73a533f6195f3e5c703730e63cb3e242c649a | [
"MIT"
] | null | null | null | import numpy as np
from scipy.stats import norm, truncnorm
from numpy.random import default_rng
### fix the number of different populations
n_pop = 4
def pick_random_hyper(all_hyper, sample_size=None):
rng = default_rng()
size = sample_size or all_hyper.shape[0]
return rng.choice(all_hyper, size=sample_size, replace=False)
def indicate(M, trans, i):
'''
indicate which M belongs to population i given transition parameter
'''
ts = np.insert(np.insert(trans, n_pop-1, np.inf), 0, -np.inf)
return (M>=ts[i]) & (M<ts[i+1])
def indicate_II(M, trans, i):
return (M>=trans[...,i]) & (M<trans[...,i+1])
def split_hyper_linear(hyper):
'''
split hyper and derive c
'''
c0, slope,sigma, trans = \
hyper[0], hyper[1:1+n_pop], hyper[1+n_pop:1+2*n_pop], hyper[1+2*n_pop:]
c = np.zeros_like(slope)
c[0] = c0
for i in range(1,n_pop):
c[i] = c[i-1] + trans[i-1]*(slope[i-1]-slope[i])
return c, slope, sigma, trans
def split_hyper_linear_II(hyper):
'''
split hyper and derive c
'''
c0, slope,sigma, trans = \
hyper[...,0], hyper[...,1:1+n_pop], hyper[...,1+n_pop:1+2*n_pop], hyper[...,1+2*n_pop:]
c = np.zeros_like(slope)
c[...,0] = c0
for i in range(1,n_pop):
c[...,i] = c[...,i-1] + trans[...,i-1]*(slope[...,i-1]-slope[...,i])
trans = np.insert(np.insert(trans,n_pop-1,np.inf,axis=1), 0, -np.inf, axis=1)
return c, slope, sigma, trans
def piece_linear_II(hyper, M, prob_R):
c, slope, sigma, trans = split_hyper_linear_II(hyper)
M = M
R = np.zeros_like(M)
for i in range(n_pop):
ind = indicate_II(M, trans, i)
mu = c[...,i]
mu[ind] += M[ind]*slope[ind,i]
R[ind] = norm.ppf(prob_R[ind],mu[ind],sigma[ind,i])
return R
def generate_mass(mean, std, sample_size):
mlower = 3e-4
mupper = 3e5
return truncnorm.rvs( (mlower-mean)/std, (mupper-mean)/std, loc=mean, scale=std, size=sample_size)
def piece_linear(hyper, M, prob_R):
'''
model: straight line
'''
M = np.array(M)
c, slope, sigma, trans = split_hyper_linear(hyper)
R = np.zeros_like(M)
for i in range(4):
ind = indicate(M, trans, i)
mu = c[i] + M[ind]*slope[i]
R[ind] = norm.ppf(prob_R[ind], mu, sigma[i])
return R
def ProbRGivenM(radii, M, hyper):
'''
p(radii|M)
'''
c, slope, sigma, trans = split_hyper_linear(hyper)
prob = np.zeros_like(M)
#print('SHAPE', prob.shape, M.shape, slope.shape)
for i in range(4):
ind = indicate(M, trans, i)
#print('MSHAPE',M[ind].shape)
mu = c[i] + M[ind]*slope[i]
#print('EXPECTED',mu)
sig = sigma[i]
prob[ind] = norm.pdf(radii, mu, sig)
prob = prob/np.sum(prob)
return prob
def ProbRGivenM_II(radii, M, hyper):
c, slope, sigma, trans = split_hyper_linear_II(hyper)
# 10, 100
prob = np.zeros(shape=(radii.shape[0], M.shape[0]))
mu = np.zeros_like(prob)
for i in range(n_pop):
mu[...] = 0.0
ind = indicate_II(M[None,...], trans[:,None,:], i)
radii_id,mass_id = np.where(ind)
#
mu[radii_id, mass_id] = c[radii_id,i] + slope[radii_id,i]*M[mass_id]#M[None,...]*slope[:,None,i][ind]
#print(mu[0])
prob[ind] = norm.pdf(radii[radii_id],mu[radii_id, mass_id],sigma[radii_id,i])
#print('C',c[:,None,i])
return (prob/np.sum(prob, axis=1)[:,None])
def random_choice_2d(arr, probs):
idx = (probs.cumsum(1) > np.random.rand(probs.shape[0])[:,None]).argmax(1)
return arr[idx]
def classification( logm, trans ):
'''
classify as four worlds
'''
count = np.zeros(4)
sample_size = len(logm)
ts = np.insert(np.insert(trans, n_pop-1, np.inf), 0, -np.inf)
for iclass in range(4):
ind = indicate_II( logm, ts, iclass)
count[iclass] = count[iclass] + ind.sum()
prob = count / np.sum(count) * 100.
print ('Terran %(T).1f %%, Neptunian %(N).1f %%, Jovian %(J).1f %%, Star %(S).1f %%' \
% {'T': prob[0], 'N': prob[1], 'J': prob[2], 'S': prob[3]})
return None | 24.198718 | 103 | 0.633907 | import numpy as np
from scipy.stats import norm, truncnorm
from numpy.random import default_rng
)
size = sample_size or all_hyper.shape[0]
return rng.choice(all_hyper, size=sample_size, replace=False)
def indicate(M, trans, i):
ts = np.insert(np.insert(trans, n_pop-1, np.inf), 0, -np.inf)
return (M>=ts[i]) & (M<ts[i+1])
def indicate_II(M, trans, i):
return (M>=trans[...,i]) & (M<trans[...,i+1])
def split_hyper_linear(hyper):
c0, slope,sigma, trans = \
hyper[0], hyper[1:1+n_pop], hyper[1+n_pop:1+2*n_pop], hyper[1+2*n_pop:]
c = np.zeros_like(slope)
c[0] = c0
for i in range(1,n_pop):
c[i] = c[i-1] + trans[i-1]*(slope[i-1]-slope[i])
return c, slope, sigma, trans
def split_hyper_linear_II(hyper):
c0, slope,sigma, trans = \
hyper[...,0], hyper[...,1:1+n_pop], hyper[...,1+n_pop:1+2*n_pop], hyper[...,1+2*n_pop:]
c = np.zeros_like(slope)
c[...,0] = c0
for i in range(1,n_pop):
c[...,i] = c[...,i-1] + trans[...,i-1]*(slope[...,i-1]-slope[...,i])
trans = np.insert(np.insert(trans,n_pop-1,np.inf,axis=1), 0, -np.inf, axis=1)
return c, slope, sigma, trans
def piece_linear_II(hyper, M, prob_R):
c, slope, sigma, trans = split_hyper_linear_II(hyper)
M = M
R = np.zeros_like(M)
for i in range(n_pop):
ind = indicate_II(M, trans, i)
mu = c[...,i]
mu[ind] += M[ind]*slope[ind,i]
R[ind] = norm.ppf(prob_R[ind],mu[ind],sigma[ind,i])
return R
def generate_mass(mean, std, sample_size):
mlower = 3e-4
mupper = 3e5
return truncnorm.rvs( (mlower-mean)/std, (mupper-mean)/std, loc=mean, scale=std, size=sample_size)
def piece_linear(hyper, M, prob_R):
M = np.array(M)
c, slope, sigma, trans = split_hyper_linear(hyper)
R = np.zeros_like(M)
for i in range(4):
ind = indicate(M, trans, i)
mu = c[i] + M[ind]*slope[i]
R[ind] = norm.ppf(prob_R[ind], mu, sigma[i])
return R
def ProbRGivenM(radii, M, hyper):
c, slope, sigma, trans = split_hyper_linear(hyper)
prob = np.zeros_like(M)
for i in range(4):
ind = indicate(M, trans, i)
mu = c[i] + M[ind]*slope[i]
sig = sigma[i]
prob[ind] = norm.pdf(radii, mu, sig)
prob = prob/np.sum(prob)
return prob
def ProbRGivenM_II(radii, M, hyper):
c, slope, sigma, trans = split_hyper_linear_II(hyper)
prob = np.zeros(shape=(radii.shape[0], M.shape[0]))
mu = np.zeros_like(prob)
for i in range(n_pop):
mu[...] = 0.0
ind = indicate_II(M[None,...], trans[:,None,:], i)
radii_id,mass_id = np.where(ind)
mu[radii_id, mass_id] = c[radii_id,i] + slope[radii_id,i]*M[mass_id]
prob[ind] = norm.pdf(radii[radii_id],mu[radii_id, mass_id],sigma[radii_id,i])
return (prob/np.sum(prob, axis=1)[:,None])
def random_choice_2d(arr, probs):
idx = (probs.cumsum(1) > np.random.rand(probs.shape[0])[:,None]).argmax(1)
return arr[idx]
def classification( logm, trans ):
count = np.zeros(4)
sample_size = len(logm)
ts = np.insert(np.insert(trans, n_pop-1, np.inf), 0, -np.inf)
for iclass in range(4):
ind = indicate_II( logm, ts, iclass)
count[iclass] = count[iclass] + ind.sum()
prob = count / np.sum(count) * 100.
print ('Terran %(T).1f %%, Neptunian %(N).1f %%, Jovian %(J).1f %%, Star %(S).1f %%' \
% {'T': prob[0], 'N': prob[1], 'J': prob[2], 'S': prob[3]})
return None | true | true |
f71f35d6375c793ab6bf23096e821ed9afadb12a | 873 | py | Python | environments/robot_arm/maddux/objects/ball.py | callaunchpad/MOR | becd8a181312882dae3d3495a730e268183f803f | [
"MIT"
] | 1 | 2018-02-11T03:09:49.000Z | 2018-02-11T03:09:49.000Z | environments/robot_arm/maddux/objects/ball.py | callaunchpad/MOR | becd8a181312882dae3d3495a730e268183f803f | [
"MIT"
] | 2 | 2018-02-08T19:45:20.000Z | 2018-10-02T09:55:39.000Z | environments/robot_arm/maddux/objects/ball.py | callaunchpad/MOR | becd8a181312882dae3d3495a730e268183f803f | [
"MIT"
] | 2 | 2018-02-10T22:51:57.000Z | 2020-04-14T02:46:22.000Z | """
A ball object to throw.
"""
import numpy as np
from throwable import ThrowableObject
from ..plot import plot_sphere
class Ball(ThrowableObject):
def __init__(self, position, radius, target=False):
"""Ball object that can move, have a velocity, and hit objects
:param position: The position (x,y,z) of the center of the ball
:type position: numpy.ndarray
:param: radius: The radius of the ball
:type radius: int
:rtype: None
"""
self.radius = radius
ThrowableObject.__init__(self, position, target)
def plot(self, ax):
"""Plots the ball at its current location.
:param ax: Figure to plot on.
:type ax: matplotlib.axes
:returns: Matplotlib figure
:rtype: matplotlib.axes
"""
return plot_sphere(self.position, self.radius, ax)
| 24.942857 | 71 | 0.631157 | import numpy as np
from throwable import ThrowableObject
from ..plot import plot_sphere
class Ball(ThrowableObject):
def __init__(self, position, radius, target=False):
self.radius = radius
ThrowableObject.__init__(self, position, target)
def plot(self, ax):
return plot_sphere(self.position, self.radius, ax)
| true | true |
f71f3605b18a86569f186808f947423814025998 | 1,000 | py | Python | setup.py | dimakarp1996/CulinaryApp | 4662da542fb22597fa185af53c39da61dcc4a560 | [
"MIT"
] | null | null | null | setup.py | dimakarp1996/CulinaryApp | 4662da542fb22597fa185af53c39da61dcc4a560 | [
"MIT"
] | 2 | 2017-12-11T07:39:09.000Z | 2017-12-18T10:53:46.000Z | setup.py | dimakarp1996/CulinaryApp | 4662da542fb22597fa185af53c39da61dcc4a560 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Setup script."""
from setuptools import setup
setup(
name="CulinaryApp",
version="0.0.0",
author="Dmitry Karpov, Andrej Lapushkin, Vyacheslav Trifonov",
author_email="dimakarp1996@yandex.ru",
url="https://github.com/dimakarp1996/CulinaryApp",
license="MIT",
packages=[
"CulinaryApp"
],
install_requires=[
"bs4",
"lxml",
"requests",
"pandas",
"python-Levenshtein",
],
setup_requires=[
"pytest-runner",
"pytest-pycodestyle",
"pytest-cov",
],
tests_require=[
"pytest",
"pycodestyle",
"mock",
"pandas"
],
classifiers=[
"Development Status :: 1 - Planning",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
],
entry_points={
'console_scripts': ['CulinaryApp=CulinaryApp.CulinaryApp:main'],
}
)
| 22.222222 | 72 | 0.56 |
from setuptools import setup
setup(
name="CulinaryApp",
version="0.0.0",
author="Dmitry Karpov, Andrej Lapushkin, Vyacheslav Trifonov",
author_email="dimakarp1996@yandex.ru",
url="https://github.com/dimakarp1996/CulinaryApp",
license="MIT",
packages=[
"CulinaryApp"
],
install_requires=[
"bs4",
"lxml",
"requests",
"pandas",
"python-Levenshtein",
],
setup_requires=[
"pytest-runner",
"pytest-pycodestyle",
"pytest-cov",
],
tests_require=[
"pytest",
"pycodestyle",
"mock",
"pandas"
],
classifiers=[
"Development Status :: 1 - Planning",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
],
entry_points={
'console_scripts': ['CulinaryApp=CulinaryApp.CulinaryApp:main'],
}
)
| true | true |
f71f363a2c9fff25aef36f7f45fa90b7cdbd5bda | 3,472 | py | Python | bindings/python/ensmallen/datasets/string/thermosiphosp1063.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 5 | 2021-02-17T00:44:45.000Z | 2021-08-09T16:41:47.000Z | bindings/python/ensmallen/datasets/string/thermosiphosp1063.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 18 | 2021-01-07T16:47:39.000Z | 2021-08-12T21:51:32.000Z | bindings/python/ensmallen/datasets/string/thermosiphosp1063.py | AnacletoLAB/ensmallen | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 3 | 2021-01-14T02:20:59.000Z | 2021-08-04T19:09:52.000Z | """
This file offers the methods to automatically retrieve the graph Thermosipho sp. 1063.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def ThermosiphoSp1063(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Thermosipho sp. 1063 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Thermosipho sp. 1063 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="ThermosiphoSp1063",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 33.066667 | 223 | 0.675979 | from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph
def ThermosiphoSp1063(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
return AutomaticallyRetrievedGraph(
graph_name="ThermosiphoSp1063",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| true | true |
f71f38c1c62e7b6318d6a1664ac6ee8c0936729a | 8,714 | py | Python | utils/transformsgpu.py | drkostas/SemiSeg-Contrastive | af6b133400368911ef77f401b7673894fe6aa05c | [
"Apache-2.0"
] | 43 | 2021-07-26T13:13:12.000Z | 2022-03-27T13:06:44.000Z | utils/transformsgpu.py | drkostas/SemiSeg-Contrastive | af6b133400368911ef77f401b7673894fe6aa05c | [
"Apache-2.0"
] | 5 | 2021-08-08T03:06:44.000Z | 2022-02-15T06:34:57.000Z | utils/transformsgpu.py | drkostas/SemiSeg-Contrastive | af6b133400368911ef77f401b7673894fe6aa05c | [
"Apache-2.0"
] | 7 | 2021-11-07T10:16:32.000Z | 2022-03-28T08:51:06.000Z | '''
Code taken from https://github.com/WilhelmT/ClassMix
Slightly modified
'''
import kornia
import torch
import random
import torch.nn as nn
def normalize_rgb(data, dataset):
"""
Args:
data: data to normalize BxCxWxH
dataset: name of the dataset to normalize
Returns:
normalized data as (x-mean)/255
"""
if dataset == 'pascal_voc':
mean = (122.6789143, 116.66876762, 104.00698793) # rgb
elif dataset == 'cityscapes':
mean = (73.15835921, 82.90891754, 72.39239876) # rgb
else:
mean = (127.5, 127.5, 127.5 )
mean = torch.Tensor(mean).unsqueeze(0).unsqueeze(2).unsqueeze(3).cuda()
data_norm = ((data-mean)/255.0)
return data_norm
def normalize_bgr(data, dataset):
"""
Args:
data: data to normalize BxCxWxH
dataset: name of the dataset to normalize
Returns:
normalized data as (x-mean)/255
"""
if dataset == 'pascal_voc':
mean = (104.00698793, 116.66876762, 122.6789143) # bgr
elif dataset == 'cityscapes':
mean = (72.39239876, 82.90891754, 73.15835921) # bgr
else:
mean = (127.5, 127.5, 127.5 )
mean = torch.Tensor(mean).unsqueeze(0).unsqueeze(2).unsqueeze(3).cuda()
data_norm = ((data-mean)/255.0)
return data_norm
def grayscale(grayscale, data = None, target = None, probs = None):
"""
Args:
grayscale: boolean whether to apply grayscale augmentation
data: input data to augment BxCxWxH
target: labels to augment BxWxH
probs: probability masks to augment BxCxWxH
Returns:
data is converted from rgb to grayscale if [grayscale] is True
target and probs are also returned with no modifications applied
"""
if not (data is None):
if grayscale and data.shape[1]==3:
seq = nn.Sequential(kornia.augmentation.RandomGrayscale(p=1.) )
data = seq(data)
return data, target, probs
def colorJitter(colorJitter, data = None, target = None, s=0.1, probs = None):
"""
Args:
colorJitter: boolean whether to apply colorJitter augmentation
data: input data to augment BxCxWxH
target: labels to augment BxWxH
probs: probability masks to augment BxCxWxH
s: brightness and contrast strength of the color jitter
Returns:
colorJitter is applied to data if [colorJitter] is True
target and probs are also returned with no modifications applied
"""
if not (data is None):
if colorJitter and data.shape[1]==3:
seq = nn.Sequential(kornia.augmentation.ColorJitter(brightness=s,contrast=s,saturation=s/2.,hue=s/3.))
data = seq(data/255.)*255. # assumes [0,1]
return data, target, probs
def gaussian_blur(blur, data = None, target = None, min_sigma=0.2, max_sigma=3, probs = None):
"""
Args:
blur: boolean whether to apply blur
data: input data to augment BxCxWxH
target: labels to augment BxWxH
probs: probability masks to augment BxCxWxH
min_sigma: minimum sigma value for the gaussian blur
max_sigma: maximum sigma value for the gaussian blur
Returns:
gaussian blur is applied to data if [blur] is True
target and probs are also returned with no modifications applied
"""
if not (data is None):
if blur and data.shape[1]==3:
seq = nn.Sequential(kornia.filters.GaussianBlur2d(kernel_size=(23, 23), sigma=(min_sigma, max_sigma)))
data = seq(data)
return data, target, probs
def flip(flip, data = None, target = None, probs = None):
"""
Args:
flip: boolean whether to apply flip augmentation
data: input data to augment BxCxWxH
target: labels to augment BxWxH
probs: probability masks to augment BxCxWxH
Returns:
data, target and probs are flipped if the boolean flip is True
"""
if flip:
if not (data is None): data = torch.flip(data,(3,))
if not (target is None):
target = torch.flip(target,(2,))
if not (probs is None):
probs = torch.flip(probs,(2,))
return data, target, probs
def solarize(solarize, data = None, target = None, probs = None):
"""
Args:
solarize: boolean whether to apply solarize augmentation
data: input data to augment BxCxWxH
target: labels to augment BxWxH
probs: probability masks to augment BxCxWxH
Returns:
data, target, probs, where
data is solarized if [solarize] is True
"""
if not (data is None):
if solarize and data.shape[1]==3:
seq = nn.Sequential(kornia.augmentation.RandomSolarize((0, 1)))
data = seq(data.cpu()/255.).cuda()*255.
return data, target, probs
def mix(mask, data = None, target = None, probs = None):
"""
Applies classMix augmentation:
https://openaccess.thecvf.com/content/WACV2021/papers/Olsson_ClassMix_Segmentation-Based_Data_Augmentation_for_Semi-Supervised_Learning_WACV_2021_paper.pdf
Args:
mask: masks for applying ClassMix. A list of B elements of CxWxH tensors
data: input data to augment BxCxWxH
target: labels to augment BxWxH
probs: probability masks to augment BxCxWxH
Returns:
data, target and probs augmented with classMix
"""
if not (data is None):
if mask.shape[0] == data.shape[0]:
data = torch.cat([((1 - mask[(i + 1) % data.shape[0]]) * data[i] + mask[(i + 1) % data.shape[0]] * data[(i + 1) % data.shape[0]]).unsqueeze(0) for i in range(data.shape[0])])
if not (target is None):
target = torch.cat([((1 - mask[(i + 1) % data.shape[0]]) * target[i] + mask[(i + 1) % data.shape[0]] * target[(i + 1) % target.shape[0]]).unsqueeze(0) for i in range(target.shape[0])])
if not (probs is None):
probs = torch.cat([((1 - mask[(i + 1) % data.shape[0]]) * probs[i] + mask[(i + 1) % data.shape[0]] * probs[(i + 1) % probs.shape[0]]).unsqueeze(0) for i in range(probs.shape[0])])
return data, target, probs
def random_scale_crop(scale, data = None, target = None, ignore_label=255, probs = None):
"""
Args:
scale: scale ratio. Float
data: input data to augment BxCxWxH
target: labels to augment BxWxH
probs: probability masks to augment BxCxWxH
ignore_label: integeer value that defines the ignore class in the datasets for the labels
Returns:
data, target and prob, after applied a scaling operation. output resolution is preserve as the same as the input resolution WxH
"""
if scale != 1:
init_size_w = data.shape[2]
init_size_h = data.shape[3]
# scale data, labels and probs
data = nn.functional.interpolate(data, scale_factor=scale, mode='bilinear', align_corners=True, recompute_scale_factor=True)
if target is not None:
target = nn.functional.interpolate(target.unsqueeze(1).float(), scale_factor=scale, mode='nearest', recompute_scale_factor=True).long().squeeze(1)
if probs is not None:
probs = nn.functional.interpolate(probs.unsqueeze(1), scale_factor=scale, mode='bilinear', align_corners=True, recompute_scale_factor=True).squeeze(1)
final_size_w = data.shape[2]
final_size_h = data.shape[3]
diff_h = init_size_h - final_size_h
diff_w = init_size_w - final_size_w
if scale < 1: # add padding if needed
if diff_h % 2 == 1:
pad = nn.ConstantPad2d((diff_w//2, diff_w//2+1, diff_h//2+1, diff_h//2), 0)
else:
pad = nn.ConstantPad2d((diff_w//2, diff_w//2, diff_h//2, diff_h//2), 0)
data = pad(data)
if probs is not None:
probs = pad(probs)
# padding with ignore label to add to labels
if diff_h % 2 == 1:
pad = nn.ConstantPad2d((diff_w//2, diff_w//2+1, diff_h//2+1, diff_h//2), ignore_label)
else:
pad = nn.ConstantPad2d((diff_w//2, diff_w//2, diff_h//2, diff_h//2), ignore_label)
if target is not None:
target = pad(target)
else: # crop if needed
w = random.randint(0, data.shape[2] - init_size_w)
h = random.randint(0, data.shape[3] - init_size_h)
data = data [:,:,h:h+init_size_h,w:w + init_size_w]
if probs is not None:
probs = probs [:,h:h+init_size_h,w:w + init_size_w]
if target is not None:
target = target [:,h:h+init_size_h,w:w + init_size_w]
return data, target, probs
| 34.442688 | 192 | 0.61843 |
import kornia
import torch
import random
import torch.nn as nn
def normalize_rgb(data, dataset):
if dataset == 'pascal_voc':
mean = (122.6789143, 116.66876762, 104.00698793)
elif dataset == 'cityscapes':
mean = (73.15835921, 82.90891754, 72.39239876)
else:
mean = (127.5, 127.5, 127.5 )
mean = torch.Tensor(mean).unsqueeze(0).unsqueeze(2).unsqueeze(3).cuda()
data_norm = ((data-mean)/255.0)
return data_norm
def normalize_bgr(data, dataset):
if dataset == 'pascal_voc':
mean = (104.00698793, 116.66876762, 122.6789143)
elif dataset == 'cityscapes':
mean = (72.39239876, 82.90891754, 73.15835921)
else:
mean = (127.5, 127.5, 127.5 )
mean = torch.Tensor(mean).unsqueeze(0).unsqueeze(2).unsqueeze(3).cuda()
data_norm = ((data-mean)/255.0)
return data_norm
def grayscale(grayscale, data = None, target = None, probs = None):
if not (data is None):
if grayscale and data.shape[1]==3:
seq = nn.Sequential(kornia.augmentation.RandomGrayscale(p=1.) )
data = seq(data)
return data, target, probs
def colorJitter(colorJitter, data = None, target = None, s=0.1, probs = None):
if not (data is None):
if colorJitter and data.shape[1]==3:
seq = nn.Sequential(kornia.augmentation.ColorJitter(brightness=s,contrast=s,saturation=s/2.,hue=s/3.))
data = seq(data/255.)*255.
return data, target, probs
def gaussian_blur(blur, data = None, target = None, min_sigma=0.2, max_sigma=3, probs = None):
if not (data is None):
if blur and data.shape[1]==3:
seq = nn.Sequential(kornia.filters.GaussianBlur2d(kernel_size=(23, 23), sigma=(min_sigma, max_sigma)))
data = seq(data)
return data, target, probs
def flip(flip, data = None, target = None, probs = None):
if flip:
if not (data is None): data = torch.flip(data,(3,))
if not (target is None):
target = torch.flip(target,(2,))
if not (probs is None):
probs = torch.flip(probs,(2,))
return data, target, probs
def solarize(solarize, data = None, target = None, probs = None):
if not (data is None):
if solarize and data.shape[1]==3:
seq = nn.Sequential(kornia.augmentation.RandomSolarize((0, 1)))
data = seq(data.cpu()/255.).cuda()*255.
return data, target, probs
def mix(mask, data = None, target = None, probs = None):
if not (data is None):
if mask.shape[0] == data.shape[0]:
data = torch.cat([((1 - mask[(i + 1) % data.shape[0]]) * data[i] + mask[(i + 1) % data.shape[0]] * data[(i + 1) % data.shape[0]]).unsqueeze(0) for i in range(data.shape[0])])
if not (target is None):
target = torch.cat([((1 - mask[(i + 1) % data.shape[0]]) * target[i] + mask[(i + 1) % data.shape[0]] * target[(i + 1) % target.shape[0]]).unsqueeze(0) for i in range(target.shape[0])])
if not (probs is None):
probs = torch.cat([((1 - mask[(i + 1) % data.shape[0]]) * probs[i] + mask[(i + 1) % data.shape[0]] * probs[(i + 1) % probs.shape[0]]).unsqueeze(0) for i in range(probs.shape[0])])
return data, target, probs
def random_scale_crop(scale, data = None, target = None, ignore_label=255, probs = None):
if scale != 1:
init_size_w = data.shape[2]
init_size_h = data.shape[3]
data = nn.functional.interpolate(data, scale_factor=scale, mode='bilinear', align_corners=True, recompute_scale_factor=True)
if target is not None:
target = nn.functional.interpolate(target.unsqueeze(1).float(), scale_factor=scale, mode='nearest', recompute_scale_factor=True).long().squeeze(1)
if probs is not None:
probs = nn.functional.interpolate(probs.unsqueeze(1), scale_factor=scale, mode='bilinear', align_corners=True, recompute_scale_factor=True).squeeze(1)
final_size_w = data.shape[2]
final_size_h = data.shape[3]
diff_h = init_size_h - final_size_h
diff_w = init_size_w - final_size_w
if scale < 1:
if diff_h % 2 == 1:
pad = nn.ConstantPad2d((diff_w//2, diff_w//2+1, diff_h//2+1, diff_h//2), 0)
else:
pad = nn.ConstantPad2d((diff_w//2, diff_w//2, diff_h//2, diff_h//2), 0)
data = pad(data)
if probs is not None:
probs = pad(probs)
if diff_h % 2 == 1:
pad = nn.ConstantPad2d((diff_w//2, diff_w//2+1, diff_h//2+1, diff_h//2), ignore_label)
else:
pad = nn.ConstantPad2d((diff_w//2, diff_w//2, diff_h//2, diff_h//2), ignore_label)
if target is not None:
target = pad(target)
else:
w = random.randint(0, data.shape[2] - init_size_w)
h = random.randint(0, data.shape[3] - init_size_h)
data = data [:,:,h:h+init_size_h,w:w + init_size_w]
if probs is not None:
probs = probs [:,h:h+init_size_h,w:w + init_size_w]
if target is not None:
target = target [:,h:h+init_size_h,w:w + init_size_w]
return data, target, probs
| true | true |
f71f38cfccdfb77ee615a237128b62e47663849d | 1,069 | py | Python | tests/test_miranda.py | dokimastis/miranda | 2fff074d828659b5fb6fa2de0de6d872d78f0f96 | [
"MIT"
] | null | null | null | tests/test_miranda.py | dokimastis/miranda | 2fff074d828659b5fb6fa2de0de6d872d78f0f96 | [
"MIT"
] | null | null | null | tests/test_miranda.py | dokimastis/miranda | 2fff074d828659b5fb6fa2de0de6d872d78f0f96 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_miranda
----------------------------------
Tests for `miranda` module.
"""
import pytest
from contextlib import contextmanager
from click.testing import CliRunner
from miranda import miranda
from miranda import cli
@pytest.fixture
def response():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
# import requests
# return requests.get('https://github.com/audreyr/cookiecutter-pypackage')
def test_content(response):
"""Sample pytest test function with the pytest fixture as an argument.
"""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
def test_command_line_interface():
runner = CliRunner()
result = runner.invoke(cli.main)
assert result.exit_code == 0
assert 'miranda.cli.main' in result.output
help_result = runner.invoke(cli.main, ['--help'])
assert help_result.exit_code == 0
assert '--help Show this message and exit.' in help_result.output
| 25.452381 | 78 | 0.687558 |
import pytest
from contextlib import contextmanager
from click.testing import CliRunner
from miranda import miranda
from miranda import cli
@pytest.fixture
def response():
def test_content(response):
def test_command_line_interface():
runner = CliRunner()
result = runner.invoke(cli.main)
assert result.exit_code == 0
assert 'miranda.cli.main' in result.output
help_result = runner.invoke(cli.main, ['--help'])
assert help_result.exit_code == 0
assert '--help Show this message and exit.' in help_result.output
| true | true |
f71f38f6115e04f53d84447035a3e9a73bd6c376 | 6,437 | py | Python | vbox/src/VBox/ValidationKit/testmanager/config.py | Nurzamal/rest_api_docker | a9cc01dfc235467d490d9663755b33ef6990bdd8 | [
"MIT"
] | null | null | null | vbox/src/VBox/ValidationKit/testmanager/config.py | Nurzamal/rest_api_docker | a9cc01dfc235467d490d9663755b33ef6990bdd8 | [
"MIT"
] | null | null | null | vbox/src/VBox/ValidationKit/testmanager/config.py | Nurzamal/rest_api_docker | a9cc01dfc235467d490d9663755b33ef6990bdd8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# $Id: config.py 69111 2017-10-17 14:26:02Z vboxsync $
"""
Test Manager Configuration.
"""
__copyright__ = \
"""
Copyright (C) 2012-2017 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision: 69111 $"
import os;
## Test Manager version string.
g_ksVersion = 'v0.1.0';
## Test Manager revision string.
g_ksRevision = ('$Revision: 69111 $')[11:-2];
## Enable VBox specific stuff.
g_kfVBoxSpecific = True;
## @name Used by the TMDatabaseConnection class.
# @{
g_ksDatabaseName = 'testmanager';
g_ksDatabaseAddress = None;
g_ksDatabasePort = None;
g_ksDatabaseUser = 'postgres';
g_ksDatabasePassword = '';
## @}
## @name User handling.
## @{
## Whether login names are case insensitive (True) or case sensitive (False).
## @note Implemented by inserting lower case names into DB and lower case
## bind variables in WHERE clauses.
g_kfLoginNameCaseInsensitive = True;
## @}
## @name File locations
## @{
## The TestManager directory.
g_ksTestManagerDir = os.path.dirname(os.path.abspath(__file__));
## The Validation Kit directory.
g_ksValidationKitDir = os.path.dirname(g_ksTestManagerDir);
## The TestManager htdoc directory.
g_ksTmHtDocDir = os.path.join(g_ksTestManagerDir, 'htdocs');
## The TestManager download directory (under htdoc somewhere), for validationkit zips.
g_ksTmDownloadDir = os.path.join(g_ksTmHtDocDir, 'download');
## The base URL relative path of the TM download directory (g_ksTmDownloadDir).
g_ksTmDownloadBaseUrlRel = 'htdocs/downloads';
## The root of the file area (referred to as TM_FILE_DIR in database docs).
g_ksFileAreaRootDir = '/var/tmp/testmanager'
## The root of the file area with the zip files (best put on a big storage server).
g_ksZipFileAreaRootDir = '/var/tmp/testmanager2'
## URL prefix for trac log viewer.
g_ksTracLogUrlPrefix = 'https://linserv.de.oracle.com/vbox/log/'
## URL prefix for trac log viewer.
g_ksTracChangsetUrlFmt = 'https://linserv.de.oracle.com/%(sRepository)s/changeset/%(iRevision)s'
## URL prefix for unprefixed build logs.
g_ksBuildLogUrlPrefix = ''
## URL prefix for unprefixed build binaries.
g_ksBuildBinUrlPrefix = '/builds/'
## The local path prefix for unprefixed build binaries. (Host file system, not web server.)
g_ksBuildBinRootDir = '/mnt/builds/'
## File on the build binary share that can be used to check that it's mounted.
g_ksBuildBinRootFile = 'builds.txt'
## @}
## @name Scheduling parameters
## @{
## The time to wait for a gang to gather (in seconds).
g_kcSecGangGathering = 600;
## The max time allowed to spend looking for a new task (in seconds).
g_kcSecMaxNewTask = 60;
## Minimum time since last task started.
g_kcSecMinSinceLastTask = 120; # (2 min)
## Minimum time since last failed task.
g_kcSecMinSinceLastFailedTask = 180; # (3 min)
## @}
## @name Test result limits.
## In general, we will fail the test when reached and stop accepting further results.
## @{
## The max number of test results per test set.
g_kcMaxTestResultsPerTS = 4096;
## The max number of test results (children) per test result.
g_kcMaxTestResultsPerTR = 512;
## The max number of test result values per test set.
g_kcMaxTestValuesPerTS = 4096;
## The max number of test result values per test result.
g_kcMaxTestValuesPerTR = 256;
## The max number of test result message per test result.
g_kcMaxTestMsgsPerTR = 4;
## The max test result nesting depth.
g_kcMaxTestResultDepth = 10;
## The max length of a test result name.
g_kcchMaxTestResultName = 64;
## The max length of a test result value name.
g_kcchMaxTestValueName = 56;
## The max length of a test result message.
g_kcchMaxTestMsg = 128;
## The max size of the main log file.
g_kcMbMaxMainLog = 32;
## The max size of an uploaded file (individual).
g_kcMbMaxUploadSingle = 150;
## The max size of all uploaded file.
g_kcMbMaxUploadTotal = 200;
## The max number of files that can be uploaded.
g_kcMaxUploads = 256;
## @}
## @name Debug Features
## @{
## Enables extra DB exception information.
g_kfDebugDbXcpt = True;
## Where to write the glue debug.
# None indicates apache error log, string indicates a file.
#g_ksSrcGlueDebugLogDst = '/tmp/testmanager-srv-glue.log';
g_ksSrcGlueDebugLogDst = None;
## Whether to enable CGI trace back in the server glue.
g_kfSrvGlueCgiTb = False;
## Enables glue debug output.
g_kfSrvGlueDebug = False;
## Timestamp and pid prefix the glue debug output.
g_kfSrvGlueDebugTS = True;
## Enables task scheduler debug output to g_ksSrcGlueDebugLogDst.
g_kfSrvGlueDebugScheduler = False;
## Enables the SQL trace back.
g_kfWebUiSqlTrace = False;
## Enables the explain in the SQL trace back.
g_kfWebUiSqlTraceExplain = False;
## Whether the postgresql version supports the TIMING option on EXPLAIN (>= 9.2).
g_kfWebUiSqlTraceExplainTiming = False;
## Display time spent processing the page.
g_kfWebUiProcessedIn = True;
## Enables WebUI debug output.
g_kfWebUiDebug = False;
## Enables WebUI SQL debug output print() calls (requires g_kfWebUiDebug).
g_kfWebUiSqlDebug = False;
## Enables the debug panel at the bottom of the page.
g_kfWebUiDebugPanel = True;
## Profile cgi/admin.py.
g_kfProfileAdmin = False;
## Profile cgi/index.py.
g_kfProfileIndex = False;
## When not None,
g_ksTestBoxDispXpctLog = '/tmp/testmanager-testboxdisp-xcpt.log'
## @}
| 34.239362 | 97 | 0.722697 |
__copyright__ = \
"""
Copyright (C) 2012-2017 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision: 69111 $"
import os;
.1.0';
evision: 69111 $')[11:-2];
ue;
DatabaseAddress = None;
g_ksDatabasePort = None;
g_ksDatabaseUser = 'postgres';
g_ksDatabasePassword = '';
pend looking for a new task (in seconds).
g_kcSecMaxNewTask = 60;
## Minimum time since last task started.
g_kcSecMinSinceLastTask = 120; # (2 min)
## Minimum time since last failed task.
g_kcSecMinSinceLastFailedTask = 180; # (3 min)
## @}
## @name Test result limits.
## In general, we will fail the test when reached and stop accepting further results.
## @{
## The max number of test results per test set.
g_kcMaxTestResultsPerTS = 4096;
## The max number of test results (children) per test result.
g_kcMaxTestResultsPerTR = 512;
## The max number of test result values per test set.
g_kcMaxTestValuesPerTS = 4096;
## The max number of test result values per test result.
g_kcMaxTestValuesPerTR = 256;
## The max number of test result message per test result.
g_kcMaxTestMsgsPerTR = 4;
## The max test result nesting depth.
g_kcMaxTestResultDepth = 10;
## The max length of a test result name.
g_kcchMaxTestResultName = 64;
## The max length of a test result value name.
g_kcchMaxTestValueName = 56;
## The max length of a test result message.
g_kcchMaxTestMsg = 128;
## The max size of the main log file.
g_kcMbMaxMainLog = 32;
## The max size of an uploaded file (individual).
g_kcMbMaxUploadSingle = 150;
## The max size of all uploaded file.
g_kcMbMaxUploadTotal = 200;
## The max number of files that can be uploaded.
g_kcMaxUploads = 256;
## @}
## @name Debug Features
## @{
## Enables extra DB exception information.
g_kfDebugDbXcpt = True;
## Where to write the glue debug.
# None indicates apache error log, string indicates a file.
#g_ksSrcGlueDebugLogDst = '/tmp/testmanager-srv-glue.log';
g_ksSrcGlueDebugLogDst = None;
## Whether to enable CGI trace back in the server glue.
g_kfSrvGlueCgiTb = False;
## Enables glue debug output.
g_kfSrvGlueDebug = False;
## Timestamp and pid prefix the glue debug output.
g_kfSrvGlueDebugTS = True;
## Enables task scheduler debug output to g_ksSrcGlueDebugLogDst.
g_kfSrvGlueDebugScheduler = False;
## Enables the SQL trace back.
g_kfWebUiSqlTrace = False;
## Enables the explain in the SQL trace back.
g_kfWebUiSqlTraceExplain = False;
## Whether the postgresql version supports the TIMING option on EXPLAIN (>= 9.2).
g_kfWebUiSqlTraceExplainTiming = False;
## Display time spent processing the page.
g_kfWebUiProcessedIn = True;
## Enables WebUI debug output.
g_kfWebUiDebug = False;
## Enables WebUI SQL debug output print() calls (requires g_kfWebUiDebug).
g_kfWebUiSqlDebug = False;
## Enables the debug panel at the bottom of the page.
g_kfWebUiDebugPanel = True;
## Profile cgi/admin.py.
g_kfProfileAdmin = False;
## Profile cgi/index.py.
g_kfProfileIndex = False;
## When not None,
g_ksTestBoxDispXpctLog = '/tmp/testmanager-testboxdisp-xcpt.log'
## @}
| true | true |
f71f391105b1c350cf8efd3610d161a083bb9149 | 4,283 | py | Python | Public/assets/Python/data_fetch.py | VictoriaGuXY/MCO-Menu-Checker-Online | 706e2e1bf7395cc344f382ea2ac53d964d459f86 | [
"MIT"
] | null | null | null | Public/assets/Python/data_fetch.py | VictoriaGuXY/MCO-Menu-Checker-Online | 706e2e1bf7395cc344f382ea2ac53d964d459f86 | [
"MIT"
] | null | null | null | Public/assets/Python/data_fetch.py | VictoriaGuXY/MCO-Menu-Checker-Online | 706e2e1bf7395cc344f382ea2ac53d964d459f86 | [
"MIT"
] | null | null | null | import sys
import json
import os
from lxml import html
from datetime import datetime
import requests
from to_json import to_dict
class Crawler:
'''
The crawler class used for retrieving information from sodexo's menu page
Note:
Blitman Commons is not yet included in sodexo's page. The class should throw an error if blm is at request
Attributes:
url (str): link to the corresponding menu page of the target dinning hall
'''
def __init__(self, target):
if target.lower() == 'cms':
self.url = 'https://menus.sodexomyway.com/BiteMenu/Menu?menuId=15465&locationId=76929001&whereami=http:' \
'//rensselaerdining.com/dining-near-me/commons-dining-hall'
elif target.lower() == 'sage':
self.url = 'https://menus.sodexomyway.com/BiteMenu/Menu?menuId=15285&locationId=76929002&whereami=http:' \
'//rensselaerdining.com/dining-near-me/russell-sage'
elif target.lower() == 'barh':
self.url = 'https://menus.sodexomyway.com/BiteMenu/Menu?menuId=667&locationId=76929003&whereami=http:' \
'//rensselaerdining.com/dining-near-me/barh-dining-hall'
elif target.lower() == 'blm':
raise ValueError(f'Blitman Commons is currently not on Sodexo\'s official website')
else:
raise ValueError(f'Target dinning hall ({target}) is not valid')
self.target = target
def crawl(self):
'''
The crawler function that uses request to get html source, and use lxml.html to build element tree
'''
tree = html.fromstring(requests.get(self.url).content)
# current date
date = int(str(datetime.today()).split()[0].split('-')[-1])
breakfast = get_dish_and_cal('breakfast', tree, date)
lunch = get_dish_and_cal('lunch', tree, date)
dinner = get_dish_and_cal('dinner', tree, date)
return breakfast, lunch, dinner
def get_dish_and_cal(time, e_tree, date):
dishes = clean_up(e_tree.xpath('./body/div[@class="my-app"]/div/div[@class="bottom-half"]/div[@class="main-content"]/div[@id="bite-menu"]/div[@id="menuid-{0:d}-day"]/div[@class="accordion"]/div[contains(@class, "{1}")]/div[contains(@class, "accordion-panel")]/div[@class="bite-menu-item"]/div[@class="col-xs-9"]/a[contains(@class, "get-nutritioncalculator")]/text()'.format(date, time)))
cals = clean_up(e_tree.xpath('./body/div[@class="my-app"]/div/div[@class="bottom-half"]/div[@class="main-content"]/div[@id="bite-menu"]/div[@id="menuid-{0:d}-day"]/div[@class="accordion"]/div[contains(@class, "{1}")]/div[contains(@class, "accordion-panel")]/div[@class="bite-menu-item"]/div[contains(@class, "text-right")]/a/text()'.format(date, time))[1:])
return dishes, cals
def to_html(name, cal, tp, src="http://placehold.it/700x400", description=""):
html = \
"""
<div class="block {0}-block">
<div class="row">
<div class="col-lg-4 col-md-6 mb-4">
<div class="card h-100">
<a href="#"><img class="card-img-top" src="{1}" alt=""></a>
<div class="card-body">
<h4 class="card-title">
<a href="#">{2}</a>
</h4>
<h5>{3}</h5>
<p class="card-text">{4}</p>
</div>
<div class="card-footer">
<small class="text-muted">★ ★ ★ ★ ☆</small>
</div>
</div>
</div>
</div>
</div>
"""
return html.format(tp, src, name, cal, description)
def clean_up(vals):
# if type(vals) != type(list()):
# raise RuntimeError(f'clean up: Expected list, but was {type(vals)}')
result = list()
for item in vals:
# if type(vals) != type(str()):
# raise RuntimeError(f'clean up: entries inside vals are not string. Was {type(item)}')
result.append(str(item).replace('\r', '').strip())
return result
# driver function
def fetch_all(name):
result = Crawler(name).crawl()
return to_dict(result[0][0], result[0][1]), to_dict(result[1][0], result[1][1]), to_dict(result[2][0], result[2][1])
if __name__ == '__main__':
print(os.getcwd())
| 38.936364 | 391 | 0.596544 | import sys
import json
import os
from lxml import html
from datetime import datetime
import requests
from to_json import to_dict
class Crawler:
def __init__(self, target):
if target.lower() == 'cms':
self.url = 'https://menus.sodexomyway.com/BiteMenu/Menu?menuId=15465&locationId=76929001&whereami=http:' \
'//rensselaerdining.com/dining-near-me/commons-dining-hall'
elif target.lower() == 'sage':
self.url = 'https://menus.sodexomyway.com/BiteMenu/Menu?menuId=15285&locationId=76929002&whereami=http:' \
'//rensselaerdining.com/dining-near-me/russell-sage'
elif target.lower() == 'barh':
self.url = 'https://menus.sodexomyway.com/BiteMenu/Menu?menuId=667&locationId=76929003&whereami=http:' \
'//rensselaerdining.com/dining-near-me/barh-dining-hall'
elif target.lower() == 'blm':
raise ValueError(f'Blitman Commons is currently not on Sodexo\'s official website')
else:
raise ValueError(f'Target dinning hall ({target}) is not valid')
self.target = target
def crawl(self):
tree = html.fromstring(requests.get(self.url).content)
# current date
date = int(str(datetime.today()).split()[0].split('-')[-1])
breakfast = get_dish_and_cal('breakfast', tree, date)
lunch = get_dish_and_cal('lunch', tree, date)
dinner = get_dish_and_cal('dinner', tree, date)
return breakfast, lunch, dinner
def get_dish_and_cal(time, e_tree, date):
dishes = clean_up(e_tree.xpath('./body/div[@class="my-app"]/div/div[@class="bottom-half"]/div[@class="main-content"]/div[@id="bite-menu"]/div[@id="menuid-{0:d}-day"]/div[@class="accordion"]/div[contains(@class, "{1}")]/div[contains(@class, "accordion-panel")]/div[@class="bite-menu-item"]/div[@class="col-xs-9"]/a[contains(@class, "get-nutritioncalculator")]/text()'.format(date, time)))
cals = clean_up(e_tree.xpath('./body/div[@class="my-app"]/div/div[@class="bottom-half"]/div[@class="main-content"]/div[@id="bite-menu"]/div[@id="menuid-{0:d}-day"]/div[@class="accordion"]/div[contains(@class, "{1}")]/div[contains(@class, "accordion-panel")]/div[@class="bite-menu-item"]/div[contains(@class, "text-right")]/a/text()'.format(date, time))[1:])
return dishes, cals
def to_html(name, cal, tp, src="http://placehold.it/700x400", description=""):
html = \
"""
<div class="block {0}-block">
<div class="row">
<div class="col-lg-4 col-md-6 mb-4">
<div class="card h-100">
<a href="#"><img class="card-img-top" src="{1}" alt=""></a>
<div class="card-body">
<h4 class="card-title">
<a href="#">{2}</a>
</h4>
<h5>{3}</h5>
<p class="card-text">{4}</p>
</div>
<div class="card-footer">
<small class="text-muted">★ ★ ★ ★ ☆</small>
</div>
</div>
</div>
</div>
</div>
"""
return html.format(tp, src, name, cal, description)
def clean_up(vals):
# if type(vals) != type(list()):
# raise RuntimeError(f'clean up: Expected list, but was {type(vals)}')
result = list()
for item in vals:
# if type(vals) != type(str()):
# raise RuntimeError(f'clean up: entries inside vals are not string. Was {type(item)}')
result.append(str(item).replace('\r', '').strip())
return result
# driver function
def fetch_all(name):
result = Crawler(name).crawl()
return to_dict(result[0][0], result[0][1]), to_dict(result[1][0], result[1][1]), to_dict(result[2][0], result[2][1])
if __name__ == '__main__':
print(os.getcwd())
| true | true |
f71f395bbff397746135521fd61c58fd06d81c7d | 3,484 | py | Python | sdk/python/pulumi_azure_nextgen/eventgrid/v20190201preview/get_domain_topic.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_nextgen/eventgrid/v20190201preview/get_domain_topic.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_nextgen/eventgrid/v20190201preview/get_domain_topic.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetDomainTopicResult',
'AwaitableGetDomainTopicResult',
'get_domain_topic',
]
@pulumi.output_type
class GetDomainTopicResult:
"""
Domain Topic
"""
def __init__(__self__, id=None, name=None, provisioning_state=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified identifier of the resource
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Provisioning state of the domain topic.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
Type of the resource
"""
return pulumi.get(self, "type")
class AwaitableGetDomainTopicResult(GetDomainTopicResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDomainTopicResult(
id=self.id,
name=self.name,
provisioning_state=self.provisioning_state,
type=self.type)
def get_domain_topic(domain_name: Optional[str] = None,
domain_topic_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDomainTopicResult:
"""
Domain Topic
:param str domain_name: Name of the domain
:param str domain_topic_name: Name of the topic
:param str resource_group_name: The name of the resource group within the user's subscription.
"""
__args__ = dict()
__args__['domainName'] = domain_name
__args__['domainTopicName'] = domain_topic_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:eventgrid/v20190201preview:getDomainTopic', __args__, opts=opts, typ=GetDomainTopicResult).value
return AwaitableGetDomainTopicResult(
id=__ret__.id,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
type=__ret__.type)
| 32.259259 | 147 | 0.64667 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetDomainTopicResult',
'AwaitableGetDomainTopicResult',
'get_domain_topic',
]
@pulumi.output_type
class GetDomainTopicResult:
def __init__(__self__, id=None, name=None, provisioning_state=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
class AwaitableGetDomainTopicResult(GetDomainTopicResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDomainTopicResult(
id=self.id,
name=self.name,
provisioning_state=self.provisioning_state,
type=self.type)
def get_domain_topic(domain_name: Optional[str] = None,
domain_topic_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDomainTopicResult:
__args__ = dict()
__args__['domainName'] = domain_name
__args__['domainTopicName'] = domain_topic_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:eventgrid/v20190201preview:getDomainTopic', __args__, opts=opts, typ=GetDomainTopicResult).value
return AwaitableGetDomainTopicResult(
id=__ret__.id,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
type=__ret__.type)
| true | true |
f71f396c55224e1478137d13c7dd9bd416d11e5a | 1,722 | py | Python | utils/file.py | xia-deng/lawerWeb | 6d2fe3642b2b7fbdda568e3af240bbcf6fda6c48 | [
"Apache-2.0"
] | null | null | null | utils/file.py | xia-deng/lawerWeb | 6d2fe3642b2b7fbdda568e3af240bbcf6fda6c48 | [
"Apache-2.0"
] | null | null | null | utils/file.py | xia-deng/lawerWeb | 6d2fe3642b2b7fbdda568e3af240bbcf6fda6c48 | [
"Apache-2.0"
] | null | null | null | '''
文件目录帮助类
'''
import os
import shutil
from utils.commonUtil import CommonUtil
class FileUtil:
'''
处理文件路径
'''
@staticmethod
def cleanPath(path):
path=path.strip('\\');
return path
'''
判断路径是否存在
'''
@staticmethod
def isExists(path):
return os.path.exists(path)
'''
新建目录
'''
@staticmethod
def mkdir(dir,isMany):
dir=''.join(dir);
CommonUtil.toString(dir)
dir=FileUtil.cleanPath(dir)
if(FileUtil.isExists(dir)):
return False
else:
try:
if(isMany==True):
os.makedirs(dir)
else:
os.mkdir(dir)
except:
return False
return True
'''
删除目录
'''
@staticmethod
def removeDir(dir,isRemoveAll):
try:
if(isRemoveAll):
shutil.rmtree(dir)
else:
os.rmdir(dir)
return True
except:
return False
'''
重命名
'''
@staticmethod
def reName(oldName,newName):
try:
os.rename(oldName,newName)
return True
except:
return False
'''
按行读取文件
'''
@staticmethod
def readFileLines(path):
try:
f=open(path,'r',encoding='utf-8')
list_lines=f.readlines();
for line in list_lines:
line=line.rstrip();
return ''.join(list_lines);
except:
return '';
@staticmethod
def writeFileBytes(path,content):
with open(path, 'w',encoding='utf8') as f:
f.write(content)
| 17.571429 | 50 | 0.472706 | import os
import shutil
from utils.commonUtil import CommonUtil
class FileUtil:
@staticmethod
def cleanPath(path):
path=path.strip('\\');
return path
@staticmethod
def isExists(path):
return os.path.exists(path)
@staticmethod
def mkdir(dir,isMany):
dir=''.join(dir);
CommonUtil.toString(dir)
dir=FileUtil.cleanPath(dir)
if(FileUtil.isExists(dir)):
return False
else:
try:
if(isMany==True):
os.makedirs(dir)
else:
os.mkdir(dir)
except:
return False
return True
@staticmethod
def removeDir(dir,isRemoveAll):
try:
if(isRemoveAll):
shutil.rmtree(dir)
else:
os.rmdir(dir)
return True
except:
return False
@staticmethod
def reName(oldName,newName):
try:
os.rename(oldName,newName)
return True
except:
return False
@staticmethod
def readFileLines(path):
try:
f=open(path,'r',encoding='utf-8')
list_lines=f.readlines();
for line in list_lines:
line=line.rstrip();
return ''.join(list_lines);
except:
return '';
@staticmethod
def writeFileBytes(path,content):
with open(path, 'w',encoding='utf8') as f:
f.write(content)
| true | true |
f71f3b9ec48575a943a9e175d9ac2120c33e738d | 23,715 | py | Python | c7n/schema.py | kentnsw/cloud-custodian | fb177d6c4775c8d39459e709cd4084b867d67e5f | [
"Apache-2.0"
] | 1 | 2022-02-16T07:00:33.000Z | 2022-02-16T07:00:33.000Z | c7n/schema.py | kentnsw/cloud-custodian | fb177d6c4775c8d39459e709cd4084b867d67e5f | [
"Apache-2.0"
] | null | null | null | c7n/schema.py | kentnsw/cloud-custodian | fb177d6c4775c8d39459e709cd4084b867d67e5f | [
"Apache-2.0"
] | 2 | 2022-02-16T07:00:36.000Z | 2022-03-02T00:37:26.000Z | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
"""
Jsonschema validation of cloud custodian config.
We start with a walkthrough of the various class registries
of resource types and assemble and generate the schema.
We do some specialization to reduce overall schema size
via reference usage, although in some cases we prefer
copies, due to issues with inheritance via reference (
allowedProperties and enum extension).
All filters and actions are annotated with schema typically using
the utils.type_schema function.
"""
from collections import Counter
import json
import inspect
import logging
from jsonschema import Draft7Validator as JsonSchemaValidator
from jsonschema.exceptions import best_match
from c7n.policy import execution
from c7n.provider import clouds
from c7n.query import sources
from c7n.resources import load_available
from c7n.resolver import ValuesFrom
from c7n.filters.core import (
ValueFilter,
EventFilter,
AgeFilter,
ReduceFilter,
OPERATORS,
VALUE_TYPES,
)
from c7n.structure import StructureParser # noqa
def validate(data, schema=None, resource_types=()):
if schema is None:
schema = generate(resource_types)
JsonSchemaValidator.check_schema(schema)
validator = JsonSchemaValidator(schema)
errors = list(validator.iter_errors(data))
if not errors:
return check_unique(data) or []
try:
resp = policy_error_scope(specific_error(errors[0]), data)
name = isinstance(
errors[0].instance,
dict) and errors[0].instance.get(
'name',
'unknown') or 'unknown'
return [resp, name]
except Exception:
logging.exception(
"specific_error failed, traceback, followed by fallback")
return list(filter(None, [
errors[0],
best_match(validator.iter_errors(data)),
]))
def check_unique(data):
counter = Counter([p['name'] for p in data.get('policies', [])])
for k, v in list(counter.items()):
if v == 1:
counter.pop(k)
if counter:
return [ValueError(
"Only one policy with a given name allowed, duplicates: {}".format(counter)),
list(counter.keys())[0]]
def policy_error_scope(error, data):
"""Scope a schema error to its policy name and resource."""
err_path = list(error.absolute_path)
if err_path[0] != 'policies':
return error
pdata = data['policies'][err_path[1]]
pdata.get('name', 'unknown')
error.message = "Error on policy:{} resource:{}\n".format(
pdata.get('name', 'unknown'), pdata.get('resource', 'unknown')) + error.message
return error
def specific_error(error):
"""Try to find the best error for humans to resolve
The jsonschema.exceptions.best_match error is based purely on a
mix of a strong match (ie. not anyOf, oneOf) and schema depth,
this often yields odd results that are semantically confusing,
instead we can use a bit of structural knowledge of schema to
provide better results.
"""
if error.validator not in ('anyOf', 'oneOf'):
return error
r = t = None
if isinstance(error.instance, dict):
t = error.instance.get('type')
r = error.instance.get('resource')
if r is not None:
found = None
for idx, v in enumerate(error.validator_value):
if '$ref' in v and v['$ref'].rsplit('/', 2)[1].endswith(r):
found = idx
break
if found is not None:
# error context is a flat list of all validation
# failures, we have to index back to the policy
# of interest.
for e in error.context:
# resource policies have a fixed path from
# the top of the schema
if e.absolute_schema_path[4] == found:
return specific_error(e)
return specific_error(error.context[idx])
if t is not None:
found = None
for idx, v in enumerate(error.validator_value):
if ('$ref' in v and
v['$ref'].rsplit('/', 2)[-1].rsplit('.', 1)[-1] == t):
found = idx
break
elif 'type' in v and t in v['properties']['type']['enum']:
found = idx
break
if found is not None:
for e in error.context:
for el in reversed(e.absolute_schema_path):
if isinstance(el, int):
if el == found:
return e
break
return error
def generate(resource_types=()):
resource_defs = {}
definitions = {
'resources': resource_defs,
'string_dict': {
"type": "object",
"patternProperties": {
"": {"type": "string"},
},
},
'basic_dict': {
"type": "object",
"patternProperties": {
"": {
'oneOf': [
{"type": "string"},
{"type": "boolean"},
{"type": "number"},
],
}
},
},
'iam-statement': {
'additionalProperties': False,
'type': 'object',
'properties': {
'Sid': {'type': 'string'},
'Effect': {'type': 'string', 'enum': ['Allow', 'Deny']},
'Principal': {'anyOf': [
{'type': 'string'},
{'type': 'object'}, {'type': 'array'}]},
'NotPrincipal': {'anyOf': [{'type': 'object'}, {'type': 'array'}]},
'Action': {'anyOf': [{'type': 'string'}, {'type': 'array'}]},
'NotAction': {'anyOf': [{'type': 'string'}, {'type': 'array'}]},
'Resource': {'anyOf': [{'type': 'string'}, {'type': 'array'}]},
'NotResource': {'anyOf': [{'type': 'string'}, {'type': 'array'}]},
'Condition': {'type': 'object'}
},
'required': ['Sid', 'Effect'],
'oneOf': [
{'required': ['Principal', 'Action', 'Resource']},
{'required': ['NotPrincipal', 'Action', 'Resource']},
{'required': ['Principal', 'NotAction', 'Resource']},
{'required': ['NotPrincipal', 'NotAction', 'Resource']},
{'required': ['Principal', 'Action', 'NotResource']},
{'required': ['NotPrincipal', 'Action', 'NotResource']},
{'required': ['Principal', 'NotAction', 'NotResource']},
{'required': ['NotPrincipal', 'NotAction', 'NotResource']}
]
},
'actions': {},
'filters': {
'value': ValueFilter.schema,
'event': EventFilter.schema,
'age': AgeFilter.schema,
'reduce': ReduceFilter.schema,
# Shortcut form of value filter as k=v
'valuekv': {
'type': 'object',
'additionalProperties': {'oneOf': [{'type': 'number'}, {'type': 'null'},
{'type': 'array', 'maxItems': 0}, {'type': 'string'}, {'type': 'boolean'}]},
'minProperties': 1,
'maxProperties': 1},
},
'filters_common': {
'comparison_operators': {
'enum': list(OPERATORS.keys())},
'value_types': {'enum': VALUE_TYPES},
'value_from': ValuesFrom.schema,
'value': {'oneOf': [
{'type': 'array'},
{'type': 'string'},
{'type': 'boolean'},
{'type': 'number'},
{'type': 'null'}]},
},
'policy': {
'type': 'object',
'required': ['name', 'resource'],
'additionalProperties': False,
'properties': {
'name': {
'type': 'string',
'pattern': "^[A-z][A-z0-9]*(-*[A-z0-9]+)*$"},
'conditions': {
'type': 'array',
'items': {'anyOf': [
{'type': 'object', 'additionalProperties': False,
'properties': {'or': {
'$ref': '#/definitions/policy/properties/conditions'}}},
{'type': 'object', 'additionalProperties': False,
'properties': {'not': {
'$ref': '#/definitions/policy/properties/conditions'}}},
{'type': 'object', 'additionalProperties': False,
'properties': {'and': {
'$ref': '#/definitions/policy/properties/conditions'}}},
{'$ref': '#/definitions/filters/value'},
{'$ref': '#/definitions/filters/event'},
{'$ref': '#/definitions/filters/valuekv'}]}},
# these should be deprecated for conditions
'region': {'type': 'string'},
'tz': {'type': 'string'},
'start': {'format': 'date-time'},
'end': {'format': 'date-time'},
'resource': {'type': 'string'},
'max-resources': {'anyOf': [
{'type': 'integer', 'minimum': 1},
{'$ref': '#/definitions/max-resources-properties'}
]},
'max-resources-percent': {'type': 'number', 'minimum': 0, 'maximum': 100},
'comment': {'type': 'string'},
'title': {'type': 'string'},
'description': {'type': 'string'},
'tags': {'type': 'array', 'items': {'type': 'string'}},
'metadata': {'type': 'object'},
'mode': {'$ref': '#/definitions/policy-mode'},
'source': {'enum': list(sources.keys())},
'actions': {
'type': 'array',
},
'filters': {
'type': 'array'
},
'metrics': {
'type': 'array'
},
#
# TODO: source queries should really move under
# source. This was initially used for describe sources
# to expose server side query mechanisms, however its
# important to note it also prevents resource cache
# utilization between policies that have different
# queries.
'query': {
'type': 'array', 'items': {'type': 'object'}}
},
},
'policy-mode': {
'anyOf': [e.schema for _, e in execution.items()],
},
'max-resources-properties': {
'type': 'object',
'additionalProperties': False,
'properties': {
'amount': {"type": 'integer', 'minimum': 1},
'op': {'enum': ['or', 'and']},
'percent': {'type': 'number', 'minimum': 0, 'maximum': 100}
}
}
}
resource_refs = []
for cloud_name, cloud_type in sorted(clouds.items()):
for type_name, resource_type in sorted(cloud_type.resources.items()):
r_type_name = "%s.%s" % (cloud_name, type_name)
if resource_types and r_type_name not in resource_types:
if not resource_type.type_aliases:
continue
elif not {"%s.%s" % (cloud_name, ralias) for ralias
in resource_type.type_aliases}.intersection(
resource_types):
continue
aliases = []
if resource_type.type_aliases:
aliases.extend(["%s.%s" % (cloud_name, a) for a in resource_type.type_aliases])
# aws gets legacy aliases with no cloud prefix
if cloud_name == 'aws':
aliases.extend(resource_type.type_aliases)
# aws gets additional alias for default name
if cloud_name == 'aws':
aliases.append(type_name)
resource_refs.append(
process_resource(
r_type_name,
resource_type,
resource_defs,
aliases,
definitions,
cloud_name
))
schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
'id': 'http://schema.cloudcustodian.io/v0/custodian.json',
'definitions': definitions,
'type': 'object',
'required': ['policies'],
'additionalProperties': False,
'properties': {
'vars': {'type': 'object'},
'policies': {
'type': 'array',
'additionalItems': False,
'items': {'anyOf': resource_refs}
}
}
}
# allow empty policies with lazy load
if not resource_refs:
schema['properties']['policies']['items'] = {'type': 'object'}
return schema
def process_resource(
type_name, resource_type, resource_defs, aliases=None,
definitions=None, provider_name=None):
r = resource_defs.setdefault(type_name, {'actions': {}, 'filters': {}})
action_refs = []
for a in ElementSchema.elements(resource_type.action_registry):
action_name = a.type
if a.schema_alias:
action_alias = "%s.%s" % (provider_name, action_name)
if action_alias in definitions['actions']:
if definitions['actions'][action_alias] != a.schema: # NOQA
msg = "Schema mismatch on type:{} action:{} w/ schema alias ".format(
type_name, action_name)
raise SyntaxError(msg)
else:
definitions['actions'][action_alias] = a.schema
action_refs.append({'$ref': '#/definitions/actions/%s' % action_alias})
else:
r['actions'][action_name] = a.schema
action_refs.append(
{'$ref': '#/definitions/resources/%s/actions/%s' % (
type_name, action_name)})
# one word action shortcuts
action_refs.append(
{'enum': list(resource_type.action_registry.keys())})
filter_refs = []
for f in ElementSchema.elements(resource_type.filter_registry):
filter_name = f.type
if filter_name == 'value':
filter_refs.append({'$ref': '#/definitions/filters/value'})
filter_refs.append({'$ref': '#/definitions/filters/valuekv'})
elif filter_name == 'event':
filter_refs.append({'$ref': '#/definitions/filters/event'})
elif f.schema_alias:
filter_alias = "%s.%s" % (provider_name, filter_name)
if filter_alias in definitions['filters']:
assert definitions['filters'][filter_alias] == f.schema, "Schema mismatch on filter w/ schema alias" # NOQA
else:
definitions['filters'][filter_alias] = f.schema
filter_refs.append({'$ref': '#/definitions/filters/%s' % filter_alias})
continue
else:
r['filters'][filter_name] = f.schema
filter_refs.append(
{'$ref': '#/definitions/resources/%s/filters/%s' % (
type_name, filter_name)})
# one word filter shortcuts
filter_refs.append(
{'enum': list(resource_type.filter_registry.keys())})
block_fref = '#/definitions/resources/%s/policy/allOf/1/properties/filters' % (
type_name)
filter_refs.extend([
{'type': 'object', 'additionalProperties': False,
'properties': {'or': {'$ref': block_fref}}},
{'type': 'object', 'additionalProperties': False,
'properties': {'and': {'$ref': block_fref}}},
{'type': 'object', 'additionalProperties': False,
'properties': {'not': {'$ref': block_fref}}}])
resource_policy = {
'allOf': [
{'$ref': '#/definitions/policy'},
{'properties': {
'resource': {'enum': [type_name]},
'filters': {
'type': 'array',
'items': {'anyOf': filter_refs}},
'actions': {
'type': 'array',
'items': {'anyOf': action_refs}}}},
]
}
if aliases:
resource_policy['allOf'][1]['properties'][
'resource']['enum'].extend(aliases)
if type_name == 'ec2':
resource_policy['allOf'][1]['properties']['query'] = {}
r['policy'] = resource_policy
return {'$ref': '#/definitions/resources/%s/policy' % type_name}
def resource_outline(provider=None):
outline = {}
for cname, ctype in sorted(clouds.items()):
if provider and provider != cname:
continue
cresources = outline[cname] = {}
for rname, rtype in sorted(ctype.resources.items()):
cresources['%s.%s' % (cname, rname)] = rinfo = {}
rinfo['filters'] = sorted(rtype.filter_registry.keys())
rinfo['actions'] = sorted(rtype.action_registry.keys())
return outline
def resource_vocabulary(cloud_name=None, qualify_name=True, aliases=True):
vocabulary = {}
resources = {}
if aliases:
vocabulary['aliases'] = {}
for cname, ctype in clouds.items():
if cloud_name is not None and cloud_name != cname:
continue
for rname, rtype in ctype.resources.items():
if qualify_name:
resources['%s.%s' % (cname, rname)] = rtype
else:
resources[rname] = rtype
for type_name, resource_type in resources.items():
classes = {'actions': {}, 'filters': {}, 'resource': resource_type}
actions = []
for cls in ElementSchema.elements(resource_type.action_registry):
action_name = ElementSchema.name(cls)
actions.append(action_name)
classes['actions'][action_name] = cls
filters = []
for cls in ElementSchema.elements(resource_type.filter_registry):
filter_name = ElementSchema.name(cls)
filters.append(filter_name)
classes['filters'][filter_name] = cls
vocabulary[type_name] = {
'filters': sorted(filters),
'actions': sorted(actions),
'classes': classes,
}
if aliases and resource_type.type_aliases:
provider = type_name.split('.', 1)[0]
for type_alias in resource_type.type_aliases:
vocabulary['aliases'][
"{}.{}".format(provider, type_alias)] = vocabulary[type_name]
if provider == 'aws':
vocabulary['aliases'][type_alias] = vocabulary[type_name]
vocabulary[type_name]['resource_type'] = type_name
vocabulary["mode"] = {}
for mode_name, cls in execution.items():
vocabulary["mode"][mode_name] = cls
return vocabulary
class ElementSchema:
"""Utility functions for working with resource's filters and actions.
"""
@staticmethod
def elements(registry):
"""Given a resource registry return sorted de-aliased values.
"""
seen = {}
for k, v in registry.items():
if k in ('and', 'or', 'not'):
continue
if v in seen:
continue
else:
seen[ElementSchema.name(v)] = v
return [seen[k] for k in sorted(seen)]
@staticmethod
def resolve(vocabulary, schema_path):
"""Given a resource vocabulary and a dotted path, resolve an element.
"""
current = vocabulary
frag = None
if schema_path.startswith('.'):
# The preprended '.' is an odd artifact
schema_path = schema_path[1:]
parts = schema_path.split('.')
while parts:
k = parts.pop(0)
if frag:
k = "%s.%s" % (frag, k)
frag = None
parts.insert(0, 'classes')
elif k in clouds:
frag = k
if len(parts) == 1:
parts.append('resource')
continue
if k not in current:
raise ValueError("Invalid schema path %s" % schema_path)
current = current[k]
return current
@staticmethod
def name(cls):
"""For a filter or action return its name."""
return cls.schema['properties']['type']['enum'][0]
@staticmethod
def doc(cls):
"""Return 'best' formatted doc string for a given class.
Walks up class hierarchy, skipping known bad. Returns
empty string if no suitable doc string found.
"""
# walk up class hierarchy for nearest
# good doc string, skip known
if cls.__doc__ is not None:
return inspect.cleandoc(cls.__doc__)
doc = None
for b in cls.__bases__:
if b in (ValueFilter, object):
continue
doc = b.__doc__ or ElementSchema.doc(b)
if doc is not None:
return inspect.cleandoc(doc)
return ""
@staticmethod
def schema(definitions, cls):
"""Return a pretty'ified version of an element schema."""
schema = isinstance(cls, type) and dict(cls.schema) or dict(cls)
schema.pop('type', None)
schema.pop('additionalProperties', None)
return ElementSchema._expand_schema(schema, definitions)
@staticmethod
def _expand_schema(schema, definitions):
"""Expand references in schema to their full schema"""
for k, v in list(schema.items()):
if k == '$ref':
# the value here is in the form of: '#/definitions/path/to/key'
parts = v.split('/')
if ['#', 'definitions'] != parts[0:2]:
raise ValueError("Invalid Ref %s" % v)
current = definitions
for p in parts[2:]:
if p not in current:
return None
current = current[p]
return ElementSchema._expand_schema(current, definitions)
elif isinstance(v, dict):
schema[k] = ElementSchema._expand_schema(v, definitions)
return schema
def pprint_schema_summary(vocabulary):
providers = {}
non_providers = {}
for type_name, rv in vocabulary.items():
if '.' not in type_name:
non_providers[type_name] = len(rv)
else:
provider, name = type_name.split('.', 1)
stats = providers.setdefault(provider, {
'resources': 0, 'actions': Counter(), 'filters': Counter()})
stats['resources'] += 1
for a in rv.get('actions'):
stats['actions'][a] += 1
for f in rv.get('filters'):
stats['filters'][f] += 1
for provider, stats in providers.items():
print("%s:" % provider)
print(" resource count: %d" % stats['resources'])
print(" actions: %d" % len(stats['actions']))
print(" filters: %d" % len(stats['filters']))
for non_providers_type, length in non_providers.items():
print("%s:" % non_providers_type)
print(" count: %d" % length)
def json_dump(resource=None):
load_available()
print(json.dumps(generate(resource), indent=2))
if __name__ == '__main__':
json_dump()
| 36.824534 | 123 | 0.518954 |
from collections import Counter
import json
import inspect
import logging
from jsonschema import Draft7Validator as JsonSchemaValidator
from jsonschema.exceptions import best_match
from c7n.policy import execution
from c7n.provider import clouds
from c7n.query import sources
from c7n.resources import load_available
from c7n.resolver import ValuesFrom
from c7n.filters.core import (
ValueFilter,
EventFilter,
AgeFilter,
ReduceFilter,
OPERATORS,
VALUE_TYPES,
)
from c7n.structure import StructureParser
def validate(data, schema=None, resource_types=()):
if schema is None:
schema = generate(resource_types)
JsonSchemaValidator.check_schema(schema)
validator = JsonSchemaValidator(schema)
errors = list(validator.iter_errors(data))
if not errors:
return check_unique(data) or []
try:
resp = policy_error_scope(specific_error(errors[0]), data)
name = isinstance(
errors[0].instance,
dict) and errors[0].instance.get(
'name',
'unknown') or 'unknown'
return [resp, name]
except Exception:
logging.exception(
"specific_error failed, traceback, followed by fallback")
return list(filter(None, [
errors[0],
best_match(validator.iter_errors(data)),
]))
def check_unique(data):
counter = Counter([p['name'] for p in data.get('policies', [])])
for k, v in list(counter.items()):
if v == 1:
counter.pop(k)
if counter:
return [ValueError(
"Only one policy with a given name allowed, duplicates: {}".format(counter)),
list(counter.keys())[0]]
def policy_error_scope(error, data):
err_path = list(error.absolute_path)
if err_path[0] != 'policies':
return error
pdata = data['policies'][err_path[1]]
pdata.get('name', 'unknown')
error.message = "Error on policy:{} resource:{}\n".format(
pdata.get('name', 'unknown'), pdata.get('resource', 'unknown')) + error.message
return error
def specific_error(error):
if error.validator not in ('anyOf', 'oneOf'):
return error
r = t = None
if isinstance(error.instance, dict):
t = error.instance.get('type')
r = error.instance.get('resource')
if r is not None:
found = None
for idx, v in enumerate(error.validator_value):
if '$ref' in v and v['$ref'].rsplit('/', 2)[1].endswith(r):
found = idx
break
if found is not None:
for e in error.context:
if e.absolute_schema_path[4] == found:
return specific_error(e)
return specific_error(error.context[idx])
if t is not None:
found = None
for idx, v in enumerate(error.validator_value):
if ('$ref' in v and
v['$ref'].rsplit('/', 2)[-1].rsplit('.', 1)[-1] == t):
found = idx
break
elif 'type' in v and t in v['properties']['type']['enum']:
found = idx
break
if found is not None:
for e in error.context:
for el in reversed(e.absolute_schema_path):
if isinstance(el, int):
if el == found:
return e
break
return error
def generate(resource_types=()):
resource_defs = {}
definitions = {
'resources': resource_defs,
'string_dict': {
"type": "object",
"patternProperties": {
"": {"type": "string"},
},
},
'basic_dict': {
"type": "object",
"patternProperties": {
"": {
'oneOf': [
{"type": "string"},
{"type": "boolean"},
{"type": "number"},
],
}
},
},
'iam-statement': {
'additionalProperties': False,
'type': 'object',
'properties': {
'Sid': {'type': 'string'},
'Effect': {'type': 'string', 'enum': ['Allow', 'Deny']},
'Principal': {'anyOf': [
{'type': 'string'},
{'type': 'object'}, {'type': 'array'}]},
'NotPrincipal': {'anyOf': [{'type': 'object'}, {'type': 'array'}]},
'Action': {'anyOf': [{'type': 'string'}, {'type': 'array'}]},
'NotAction': {'anyOf': [{'type': 'string'}, {'type': 'array'}]},
'Resource': {'anyOf': [{'type': 'string'}, {'type': 'array'}]},
'NotResource': {'anyOf': [{'type': 'string'}, {'type': 'array'}]},
'Condition': {'type': 'object'}
},
'required': ['Sid', 'Effect'],
'oneOf': [
{'required': ['Principal', 'Action', 'Resource']},
{'required': ['NotPrincipal', 'Action', 'Resource']},
{'required': ['Principal', 'NotAction', 'Resource']},
{'required': ['NotPrincipal', 'NotAction', 'Resource']},
{'required': ['Principal', 'Action', 'NotResource']},
{'required': ['NotPrincipal', 'Action', 'NotResource']},
{'required': ['Principal', 'NotAction', 'NotResource']},
{'required': ['NotPrincipal', 'NotAction', 'NotResource']}
]
},
'actions': {},
'filters': {
'value': ValueFilter.schema,
'event': EventFilter.schema,
'age': AgeFilter.schema,
'reduce': ReduceFilter.schema,
'valuekv': {
'type': 'object',
'additionalProperties': {'oneOf': [{'type': 'number'}, {'type': 'null'},
{'type': 'array', 'maxItems': 0}, {'type': 'string'}, {'type': 'boolean'}]},
'minProperties': 1,
'maxProperties': 1},
},
'filters_common': {
'comparison_operators': {
'enum': list(OPERATORS.keys())},
'value_types': {'enum': VALUE_TYPES},
'value_from': ValuesFrom.schema,
'value': {'oneOf': [
{'type': 'array'},
{'type': 'string'},
{'type': 'boolean'},
{'type': 'number'},
{'type': 'null'}]},
},
'policy': {
'type': 'object',
'required': ['name', 'resource'],
'additionalProperties': False,
'properties': {
'name': {
'type': 'string',
'pattern': "^[A-z][A-z0-9]*(-*[A-z0-9]+)*$"},
'conditions': {
'type': 'array',
'items': {'anyOf': [
{'type': 'object', 'additionalProperties': False,
'properties': {'or': {
'$ref': '#/definitions/policy/properties/conditions'}}},
{'type': 'object', 'additionalProperties': False,
'properties': {'not': {
'$ref': '#/definitions/policy/properties/conditions'}}},
{'type': 'object', 'additionalProperties': False,
'properties': {'and': {
'$ref': '#/definitions/policy/properties/conditions'}}},
{'$ref': '#/definitions/filters/value'},
{'$ref': '#/definitions/filters/event'},
{'$ref': '#/definitions/filters/valuekv'}]}},
'region': {'type': 'string'},
'tz': {'type': 'string'},
'start': {'format': 'date-time'},
'end': {'format': 'date-time'},
'resource': {'type': 'string'},
'max-resources': {'anyOf': [
{'type': 'integer', 'minimum': 1},
{'$ref': '#/definitions/max-resources-properties'}
]},
'max-resources-percent': {'type': 'number', 'minimum': 0, 'maximum': 100},
'comment': {'type': 'string'},
'title': {'type': 'string'},
'description': {'type': 'string'},
'tags': {'type': 'array', 'items': {'type': 'string'}},
'metadata': {'type': 'object'},
'mode': {'$ref': '#/definitions/policy-mode'},
'source': {'enum': list(sources.keys())},
'actions': {
'type': 'array',
},
'filters': {
'type': 'array'
},
'metrics': {
'type': 'array'
},
'query': {
'type': 'array', 'items': {'type': 'object'}}
},
},
'policy-mode': {
'anyOf': [e.schema for _, e in execution.items()],
},
'max-resources-properties': {
'type': 'object',
'additionalProperties': False,
'properties': {
'amount': {"type": 'integer', 'minimum': 1},
'op': {'enum': ['or', 'and']},
'percent': {'type': 'number', 'minimum': 0, 'maximum': 100}
}
}
}
resource_refs = []
for cloud_name, cloud_type in sorted(clouds.items()):
for type_name, resource_type in sorted(cloud_type.resources.items()):
r_type_name = "%s.%s" % (cloud_name, type_name)
if resource_types and r_type_name not in resource_types:
if not resource_type.type_aliases:
continue
elif not {"%s.%s" % (cloud_name, ralias) for ralias
in resource_type.type_aliases}.intersection(
resource_types):
continue
aliases = []
if resource_type.type_aliases:
aliases.extend(["%s.%s" % (cloud_name, a) for a in resource_type.type_aliases])
if cloud_name == 'aws':
aliases.extend(resource_type.type_aliases)
if cloud_name == 'aws':
aliases.append(type_name)
resource_refs.append(
process_resource(
r_type_name,
resource_type,
resource_defs,
aliases,
definitions,
cloud_name
))
schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
'id': 'http://schema.cloudcustodian.io/v0/custodian.json',
'definitions': definitions,
'type': 'object',
'required': ['policies'],
'additionalProperties': False,
'properties': {
'vars': {'type': 'object'},
'policies': {
'type': 'array',
'additionalItems': False,
'items': {'anyOf': resource_refs}
}
}
}
if not resource_refs:
schema['properties']['policies']['items'] = {'type': 'object'}
return schema
def process_resource(
type_name, resource_type, resource_defs, aliases=None,
definitions=None, provider_name=None):
r = resource_defs.setdefault(type_name, {'actions': {}, 'filters': {}})
action_refs = []
for a in ElementSchema.elements(resource_type.action_registry):
action_name = a.type
if a.schema_alias:
action_alias = "%s.%s" % (provider_name, action_name)
if action_alias in definitions['actions']:
if definitions['actions'][action_alias] != a.schema:
msg = "Schema mismatch on type:{} action:{} w/ schema alias ".format(
type_name, action_name)
raise SyntaxError(msg)
else:
definitions['actions'][action_alias] = a.schema
action_refs.append({'$ref': '#/definitions/actions/%s' % action_alias})
else:
r['actions'][action_name] = a.schema
action_refs.append(
{'$ref': '#/definitions/resources/%s/actions/%s' % (
type_name, action_name)})
action_refs.append(
{'enum': list(resource_type.action_registry.keys())})
filter_refs = []
for f in ElementSchema.elements(resource_type.filter_registry):
filter_name = f.type
if filter_name == 'value':
filter_refs.append({'$ref': '#/definitions/filters/value'})
filter_refs.append({'$ref': '#/definitions/filters/valuekv'})
elif filter_name == 'event':
filter_refs.append({'$ref': '#/definitions/filters/event'})
elif f.schema_alias:
filter_alias = "%s.%s" % (provider_name, filter_name)
if filter_alias in definitions['filters']:
assert definitions['filters'][filter_alias] == f.schema, "Schema mismatch on filter w/ schema alias"
else:
definitions['filters'][filter_alias] = f.schema
filter_refs.append({'$ref': '#/definitions/filters/%s' % filter_alias})
continue
else:
r['filters'][filter_name] = f.schema
filter_refs.append(
{'$ref': '#/definitions/resources/%s/filters/%s' % (
type_name, filter_name)})
filter_refs.append(
{'enum': list(resource_type.filter_registry.keys())})
block_fref = '#/definitions/resources/%s/policy/allOf/1/properties/filters' % (
type_name)
filter_refs.extend([
{'type': 'object', 'additionalProperties': False,
'properties': {'or': {'$ref': block_fref}}},
{'type': 'object', 'additionalProperties': False,
'properties': {'and': {'$ref': block_fref}}},
{'type': 'object', 'additionalProperties': False,
'properties': {'not': {'$ref': block_fref}}}])
resource_policy = {
'allOf': [
{'$ref': '#/definitions/policy'},
{'properties': {
'resource': {'enum': [type_name]},
'filters': {
'type': 'array',
'items': {'anyOf': filter_refs}},
'actions': {
'type': 'array',
'items': {'anyOf': action_refs}}}},
]
}
if aliases:
resource_policy['allOf'][1]['properties'][
'resource']['enum'].extend(aliases)
if type_name == 'ec2':
resource_policy['allOf'][1]['properties']['query'] = {}
r['policy'] = resource_policy
return {'$ref': '#/definitions/resources/%s/policy' % type_name}
def resource_outline(provider=None):
outline = {}
for cname, ctype in sorted(clouds.items()):
if provider and provider != cname:
continue
cresources = outline[cname] = {}
for rname, rtype in sorted(ctype.resources.items()):
cresources['%s.%s' % (cname, rname)] = rinfo = {}
rinfo['filters'] = sorted(rtype.filter_registry.keys())
rinfo['actions'] = sorted(rtype.action_registry.keys())
return outline
def resource_vocabulary(cloud_name=None, qualify_name=True, aliases=True):
vocabulary = {}
resources = {}
if aliases:
vocabulary['aliases'] = {}
for cname, ctype in clouds.items():
if cloud_name is not None and cloud_name != cname:
continue
for rname, rtype in ctype.resources.items():
if qualify_name:
resources['%s.%s' % (cname, rname)] = rtype
else:
resources[rname] = rtype
for type_name, resource_type in resources.items():
classes = {'actions': {}, 'filters': {}, 'resource': resource_type}
actions = []
for cls in ElementSchema.elements(resource_type.action_registry):
action_name = ElementSchema.name(cls)
actions.append(action_name)
classes['actions'][action_name] = cls
filters = []
for cls in ElementSchema.elements(resource_type.filter_registry):
filter_name = ElementSchema.name(cls)
filters.append(filter_name)
classes['filters'][filter_name] = cls
vocabulary[type_name] = {
'filters': sorted(filters),
'actions': sorted(actions),
'classes': classes,
}
if aliases and resource_type.type_aliases:
provider = type_name.split('.', 1)[0]
for type_alias in resource_type.type_aliases:
vocabulary['aliases'][
"{}.{}".format(provider, type_alias)] = vocabulary[type_name]
if provider == 'aws':
vocabulary['aliases'][type_alias] = vocabulary[type_name]
vocabulary[type_name]['resource_type'] = type_name
vocabulary["mode"] = {}
for mode_name, cls in execution.items():
vocabulary["mode"][mode_name] = cls
return vocabulary
class ElementSchema:
@staticmethod
def elements(registry):
seen = {}
for k, v in registry.items():
if k in ('and', 'or', 'not'):
continue
if v in seen:
continue
else:
seen[ElementSchema.name(v)] = v
return [seen[k] for k in sorted(seen)]
@staticmethod
def resolve(vocabulary, schema_path):
current = vocabulary
frag = None
if schema_path.startswith('.'):
schema_path = schema_path[1:]
parts = schema_path.split('.')
while parts:
k = parts.pop(0)
if frag:
k = "%s.%s" % (frag, k)
frag = None
parts.insert(0, 'classes')
elif k in clouds:
frag = k
if len(parts) == 1:
parts.append('resource')
continue
if k not in current:
raise ValueError("Invalid schema path %s" % schema_path)
current = current[k]
return current
@staticmethod
def name(cls):
return cls.schema['properties']['type']['enum'][0]
@staticmethod
def doc(cls):
if cls.__doc__ is not None:
return inspect.cleandoc(cls.__doc__)
doc = None
for b in cls.__bases__:
if b in (ValueFilter, object):
continue
doc = b.__doc__ or ElementSchema.doc(b)
if doc is not None:
return inspect.cleandoc(doc)
return ""
@staticmethod
def schema(definitions, cls):
schema = isinstance(cls, type) and dict(cls.schema) or dict(cls)
schema.pop('type', None)
schema.pop('additionalProperties', None)
return ElementSchema._expand_schema(schema, definitions)
@staticmethod
def _expand_schema(schema, definitions):
for k, v in list(schema.items()):
if k == '$ref':
parts = v.split('/')
if ['#', 'definitions'] != parts[0:2]:
raise ValueError("Invalid Ref %s" % v)
current = definitions
for p in parts[2:]:
if p not in current:
return None
current = current[p]
return ElementSchema._expand_schema(current, definitions)
elif isinstance(v, dict):
schema[k] = ElementSchema._expand_schema(v, definitions)
return schema
def pprint_schema_summary(vocabulary):
providers = {}
non_providers = {}
for type_name, rv in vocabulary.items():
if '.' not in type_name:
non_providers[type_name] = len(rv)
else:
provider, name = type_name.split('.', 1)
stats = providers.setdefault(provider, {
'resources': 0, 'actions': Counter(), 'filters': Counter()})
stats['resources'] += 1
for a in rv.get('actions'):
stats['actions'][a] += 1
for f in rv.get('filters'):
stats['filters'][f] += 1
for provider, stats in providers.items():
print("%s:" % provider)
print(" resource count: %d" % stats['resources'])
print(" actions: %d" % len(stats['actions']))
print(" filters: %d" % len(stats['filters']))
for non_providers_type, length in non_providers.items():
print("%s:" % non_providers_type)
print(" count: %d" % length)
def json_dump(resource=None):
load_available()
print(json.dumps(generate(resource), indent=2))
if __name__ == '__main__':
json_dump()
| true | true |
f71f3bbc71d7c2bbbd154160ce6aa8cd9c6c6522 | 389 | py | Python | networkapi/usuario/urls_authenticate.py | vinicius-marinho/GloboNetworkAPI | 94651d3b4dd180769bc40ec966814f3427ccfb5b | [
"Apache-2.0"
] | 73 | 2015-04-13T17:56:11.000Z | 2022-03-24T06:13:07.000Z | networkapi/usuario/urls_authenticate.py | leopoldomauricio/GloboNetworkAPI | 3b5b2e336d9eb53b2c113977bfe466b23a50aa29 | [
"Apache-2.0"
] | 99 | 2015-04-03T01:04:46.000Z | 2021-10-03T23:24:48.000Z | networkapi/usuario/urls_authenticate.py | shildenbrand/GloboNetworkAPI | 515d5e961456cee657c08c275faa1b69b7452719 | [
"Apache-2.0"
] | 64 | 2015-08-05T21:26:29.000Z | 2022-03-22T01:06:28.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from django.conf.urls import patterns
from django.conf.urls import url
from networkapi.usuario.resource.AuthenticateResource import AuthenticateResource
authenticate_resource = AuthenticateResource()
urlpatterns = patterns(
'',
url(r'^$', authenticate_resource.handle_request,
name='user.authenticate'),
)
| 24.3125 | 81 | 0.766067 |
from __future__ import absolute_import
from django.conf.urls import patterns
from django.conf.urls import url
from networkapi.usuario.resource.AuthenticateResource import AuthenticateResource
authenticate_resource = AuthenticateResource()
urlpatterns = patterns(
'',
url(r'^$', authenticate_resource.handle_request,
name='user.authenticate'),
)
| true | true |
f71f3cd355dbf084d2bb6bf0aa7899cc78fefb66 | 371 | py | Python | smallbusiness/users/urls.py | dhirensr/smallbusiness | a5628f9591f705782dc92710338c0e6d74751957 | [
"MIT"
] | 1 | 2021-03-30T20:10:00.000Z | 2021-03-30T20:10:00.000Z | smallbusiness/users/urls.py | dhirensr/smallbusiness | a5628f9591f705782dc92710338c0e6d74751957 | [
"MIT"
] | 5 | 2021-04-06T07:54:16.000Z | 2022-03-01T22:26:07.000Z | smallbusiness/users/urls.py | dhirensr/smallbusiness | a5628f9591f705782dc92710338c0e6d74751957 | [
"MIT"
] | null | null | null | from django.urls import path
from smallbusiness.users.views import (
user_detail_view,
user_redirect_view,
user_update_view,
)
app_name = "users"
urlpatterns = [
path("~redirect/", view=user_redirect_view, name="redirect"),
path("~update/", view=user_update_view, name="update"),
path("<str:username>/", view=user_detail_view, name="detail"),
]
| 24.733333 | 66 | 0.706199 | from django.urls import path
from smallbusiness.users.views import (
user_detail_view,
user_redirect_view,
user_update_view,
)
app_name = "users"
urlpatterns = [
path("~redirect/", view=user_redirect_view, name="redirect"),
path("~update/", view=user_update_view, name="update"),
path("<str:username>/", view=user_detail_view, name="detail"),
]
| true | true |
f71f3cf0eefc642a527fe9dd1e0224c4e95c3350 | 1,841 | py | Python | NDBSCANjDE/CF3.py | krowck/ISDA-NCjDE-HJ | 44c33ba12542a88eaa39fe2b72398ffd7b439372 | [
"MIT"
] | null | null | null | NDBSCANjDE/CF3.py | krowck/ISDA-NCjDE-HJ | 44c33ba12542a88eaa39fe2b72398ffd7b439372 | [
"MIT"
] | null | null | null | NDBSCANjDE/CF3.py | krowck/ISDA-NCjDE-HJ | 44c33ba12542a88eaa39fe2b72398ffd7b439372 | [
"MIT"
] | null | null | null | ###############################################################################
# Version: 1.1
# Last modified on: 3 April, 2016
# Developers: Michael G. Epitropakis
# email: m_(DOT)_epitropakis_(AT)_lancaster_(DOT)_ac_(DOT)_uk
###############################################################################
from cfunction import *
import numpy as np
class CF3(CFunction):
def __init__(self, dim):
super(CF3, self).__init__(dim, 6)
# Initialize data for composition
self._CFunction__sigma_ = np.array( [1.0, 1.0, 2.0, 2.0, 2.0, 2.0] )
self._CFunction__bias_ = np.zeros( self._CFunction__nofunc_ )
self._CFunction__weight_ = np.zeros( self._CFunction__nofunc_ )
self._CFunction__lambda_ = np.array( [1.0/4.0, 1.0/10.0, 2.0, 1.0, 2.0, 5.0] )
# Lower/Upper Bounds
self._CFunction__lbound_ = -5.0 * np.ones( dim )
self._CFunction__ubound_ = 5.0 * np.ones( dim )
# Load optima
o = np.loadtxt('data/optima.dat')
if o.shape[1] >= dim:
self._CFunction__O_ = o[:self._CFunction__nofunc_, :dim]
else: # randomly initialize
self._CFunction__O_ = self._CFunction__lbound_ + (self._CFunction__ubound_ - self._CFunction__lbound_) * np.random.rand( (self._CFunction__nofunc_, dim) )
# Load M_: Rotation matrices
if dim == 2 or dim == 3 or dim == 5 or dim == 10 or dim == 20:
fname = "data/CF3_M_D" + str(dim) + ".dat"
self._CFunction__load_rotmat(fname)
else:
# M_ Identity matrices # TODO: Generate dimension independent rotation matrices
self._CFunction__M_ = [ np.eye(dim) ] * self._CFunction__nofunc_
# Initialize functions of the composition
self._CFunction__function_ = {0:FEF8F2, 1:FEF8F2, 2:FWeierstrass, 3:FWeierstrass, 4:FGrienwank, 5:FGrienwank}
# Calculate fmaxi
self._CFunction__calculate_fmaxi()
def evaluate(self, x):
return self._CFunction__evaluate_inner_(x)
| 39.170213 | 157 | 0.649647 | true | true | |
f71f3cf164bfca3bb61e5d75622365d64cdd91fe | 10,363 | py | Python | crabageprediction/venv/Lib/site-packages/pandas/tests/extension/base/dim2.py | 13rianlucero/CrabAgePrediction | 92bc7fbe1040f49e820473e33cc3902a5a7177c7 | [
"MIT"
] | 3 | 2021-11-23T05:35:28.000Z | 2022-02-10T08:05:53.000Z | crabageprediction/venv/Lib/site-packages/pandas/tests/extension/base/dim2.py | 13rianlucero/CrabAgePrediction | 92bc7fbe1040f49e820473e33cc3902a5a7177c7 | [
"MIT"
] | 5 | 2022-02-13T14:38:04.000Z | 2022-02-15T00:13:07.000Z | crabageprediction/venv/Lib/site-packages/pandas/tests/extension/base/dim2.py | 13rianlucero/CrabAgePrediction | 92bc7fbe1040f49e820473e33cc3902a5a7177c7 | [
"MIT"
] | 5 | 2018-04-24T13:31:56.000Z | 2021-10-21T05:06:23.000Z | """
Tests for 2D compatibility.
"""
import numpy as np
import pytest
from pandas._libs.missing import is_matching_na
import pandas as pd
from pandas.core.arrays.integer import INT_STR_TO_DTYPE
from pandas.tests.extension.base.base import BaseExtensionTests
class Dim2CompatTests(BaseExtensionTests):
def test_transpose(self, data):
arr2d = data.repeat(2).reshape(-1, 2)
shape = arr2d.shape
assert shape[0] != shape[-1] # otherwise the rest of the test is useless
assert arr2d.T.shape == shape[::-1]
def test_frame_from_2d_array(self, data):
arr2d = data.repeat(2).reshape(-1, 2)
df = pd.DataFrame(arr2d)
expected = pd.DataFrame({0: arr2d[:, 0], 1: arr2d[:, 1]})
self.assert_frame_equal(df, expected)
def test_swapaxes(self, data):
arr2d = data.repeat(2).reshape(-1, 2)
result = arr2d.swapaxes(0, 1)
expected = arr2d.T
self.assert_extension_array_equal(result, expected)
def test_delete_2d(self, data):
arr2d = data.repeat(3).reshape(-1, 3)
# axis = 0
result = arr2d.delete(1, axis=0)
expected = data.delete(1).repeat(3).reshape(-1, 3)
self.assert_extension_array_equal(result, expected)
# axis = 1
result = arr2d.delete(1, axis=1)
expected = data.repeat(2).reshape(-1, 2)
self.assert_extension_array_equal(result, expected)
def test_take_2d(self, data):
arr2d = data.reshape(-1, 1)
result = arr2d.take([0, 0, -1], axis=0)
expected = data.take([0, 0, -1]).reshape(-1, 1)
self.assert_extension_array_equal(result, expected)
def test_repr_2d(self, data):
# this could fail in a corner case where an element contained the name
res = repr(data.reshape(1, -1))
assert res.count(f"<{type(data).__name__}") == 1
res = repr(data.reshape(-1, 1))
assert res.count(f"<{type(data).__name__}") == 1
def test_reshape(self, data):
arr2d = data.reshape(-1, 1)
assert arr2d.shape == (data.size, 1)
assert len(arr2d) == len(data)
arr2d = data.reshape((-1, 1))
assert arr2d.shape == (data.size, 1)
assert len(arr2d) == len(data)
with pytest.raises(ValueError):
data.reshape((data.size, 2))
with pytest.raises(ValueError):
data.reshape(data.size, 2)
def test_getitem_2d(self, data):
arr2d = data.reshape(1, -1)
result = arr2d[0]
self.assert_extension_array_equal(result, data)
with pytest.raises(IndexError):
arr2d[1]
with pytest.raises(IndexError):
arr2d[-2]
result = arr2d[:]
self.assert_extension_array_equal(result, arr2d)
result = arr2d[:, :]
self.assert_extension_array_equal(result, arr2d)
result = arr2d[:, 0]
expected = data[[0]]
self.assert_extension_array_equal(result, expected)
# dimension-expanding getitem on 1D
result = data[:, np.newaxis]
self.assert_extension_array_equal(result, arr2d.T)
def test_iter_2d(self, data):
arr2d = data.reshape(1, -1)
objs = list(iter(arr2d))
assert len(objs) == arr2d.shape[0]
for obj in objs:
assert isinstance(obj, type(data))
assert obj.dtype == data.dtype
assert obj.ndim == 1
assert len(obj) == arr2d.shape[1]
def test_tolist_2d(self, data):
arr2d = data.reshape(1, -1)
result = arr2d.tolist()
expected = [data.tolist()]
assert isinstance(result, list)
assert all(isinstance(x, list) for x in result)
assert result == expected
def test_concat_2d(self, data):
left = type(data)._concat_same_type([data, data]).reshape(-1, 2)
right = left.copy()
# axis=0
result = left._concat_same_type([left, right], axis=0)
expected = data._concat_same_type([data] * 4).reshape(-1, 2)
self.assert_extension_array_equal(result, expected)
# axis=1
result = left._concat_same_type([left, right], axis=1)
assert result.shape == (len(data), 4)
self.assert_extension_array_equal(result[:, :2], left)
self.assert_extension_array_equal(result[:, 2:], right)
# axis > 1 -> invalid
msg = "axis 2 is out of bounds for array of dimension 2"
with pytest.raises(ValueError, match=msg):
left._concat_same_type([left, right], axis=2)
@pytest.mark.parametrize("method", ["backfill", "pad"])
def test_fillna_2d_method(self, data_missing, method):
arr = data_missing.repeat(2).reshape(2, 2)
assert arr[0].isna().all()
assert not arr[1].isna().any()
result = arr.fillna(method=method)
expected = data_missing.fillna(method=method).repeat(2).reshape(2, 2)
self.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize("method", ["mean", "median", "var", "std", "sum", "prod"])
def test_reductions_2d_axis_none(self, data, method):
arr2d = data.reshape(1, -1)
err_expected = None
err_result = None
try:
expected = getattr(data, method)()
except Exception as err:
# if the 1D reduction is invalid, the 2D reduction should be as well
err_expected = err
try:
result = getattr(arr2d, method)(axis=None)
except Exception as err2:
err_result = err2
else:
result = getattr(arr2d, method)(axis=None)
if err_result is not None or err_expected is not None:
assert type(err_result) == type(err_expected)
return
assert is_matching_na(result, expected) or result == expected
@pytest.mark.parametrize("method", ["mean", "median", "var", "std", "sum", "prod"])
def test_reductions_2d_axis0(self, data, method):
arr2d = data.reshape(1, -1)
kwargs = {}
if method == "std":
# pass ddof=0 so we get all-zero std instead of all-NA std
kwargs["ddof"] = 0
try:
result = getattr(arr2d, method)(axis=0, **kwargs)
except Exception as err:
try:
getattr(data, method)()
except Exception as err2:
assert type(err) == type(err2)
return
else:
raise AssertionError("Both reductions should raise or neither")
def get_reduction_result_dtype(dtype):
# windows and 32bit builds will in some cases have int32/uint32
# where other builds will have int64/uint64.
if dtype.itemsize == 8:
return dtype
elif dtype.kind in "ib":
return INT_STR_TO_DTYPE[np.dtype(int).name]
else:
# i.e. dtype.kind == "u"
return INT_STR_TO_DTYPE[np.dtype(np.uint).name]
if method in ["mean", "median", "sum", "prod"]:
# std and var are not dtype-preserving
expected = data
if method in ["sum", "prod"] and data.dtype.kind in "iub":
dtype = get_reduction_result_dtype(data.dtype)
expected = data.astype(dtype)
if data.dtype.kind == "b" and method in ["sum", "prod"]:
# We get IntegerArray instead of BooleanArray
pass
else:
assert type(expected) == type(data), type(expected)
assert dtype == expected.dtype
self.assert_extension_array_equal(result, expected)
elif method == "std":
self.assert_extension_array_equal(result, data - data)
# punt on method == "var"
@pytest.mark.parametrize("method", ["mean", "median", "var", "std", "sum", "prod"])
def test_reductions_2d_axis1(self, data, method):
arr2d = data.reshape(1, -1)
try:
result = getattr(arr2d, method)(axis=1)
except Exception as err:
try:
getattr(data, method)()
except Exception as err2:
assert type(err) == type(err2)
return
else:
raise AssertionError("Both reductions should raise or neither")
# not necessarily type/dtype-preserving, so weaker assertions
assert result.shape == (1,)
expected_scalar = getattr(data, method)()
res = result[0]
assert is_matching_na(res, expected_scalar) or res == expected_scalar
class NDArrayBacked2DTests(Dim2CompatTests):
# More specific tests for NDArrayBackedExtensionArray subclasses
def test_copy_order(self, data):
# We should be matching numpy semantics for the "order" keyword in 'copy'
arr2d = data.repeat(2).reshape(-1, 2)
assert arr2d._ndarray.flags["C_CONTIGUOUS"]
res = arr2d.copy()
assert res._ndarray.flags["C_CONTIGUOUS"]
res = arr2d[::2, ::2].copy()
assert res._ndarray.flags["C_CONTIGUOUS"]
res = arr2d.copy("F")
assert not res._ndarray.flags["C_CONTIGUOUS"]
assert res._ndarray.flags["F_CONTIGUOUS"]
res = arr2d.copy("K")
assert res._ndarray.flags["C_CONTIGUOUS"]
res = arr2d.T.copy("K")
assert not res._ndarray.flags["C_CONTIGUOUS"]
assert res._ndarray.flags["F_CONTIGUOUS"]
# order not accepted by numpy
msg = r"order must be one of 'C', 'F', 'A', or 'K' \(got 'Q'\)"
with pytest.raises(ValueError, match=msg):
arr2d.copy("Q")
# neither contiguity
arr_nc = arr2d[::2]
assert not arr_nc._ndarray.flags["C_CONTIGUOUS"]
assert not arr_nc._ndarray.flags["F_CONTIGUOUS"]
assert arr_nc.copy()._ndarray.flags["C_CONTIGUOUS"]
assert not arr_nc.copy()._ndarray.flags["F_CONTIGUOUS"]
assert arr_nc.copy("C")._ndarray.flags["C_CONTIGUOUS"]
assert not arr_nc.copy("C")._ndarray.flags["F_CONTIGUOUS"]
assert not arr_nc.copy("F")._ndarray.flags["C_CONTIGUOUS"]
assert arr_nc.copy("F")._ndarray.flags["F_CONTIGUOUS"]
assert arr_nc.copy("K")._ndarray.flags["C_CONTIGUOUS"]
assert not arr_nc.copy("K")._ndarray.flags["F_CONTIGUOUS"]
| 34.31457 | 87 | 0.593361 | import numpy as np
import pytest
from pandas._libs.missing import is_matching_na
import pandas as pd
from pandas.core.arrays.integer import INT_STR_TO_DTYPE
from pandas.tests.extension.base.base import BaseExtensionTests
class Dim2CompatTests(BaseExtensionTests):
def test_transpose(self, data):
arr2d = data.repeat(2).reshape(-1, 2)
shape = arr2d.shape
assert shape[0] != shape[-1]
assert arr2d.T.shape == shape[::-1]
def test_frame_from_2d_array(self, data):
arr2d = data.repeat(2).reshape(-1, 2)
df = pd.DataFrame(arr2d)
expected = pd.DataFrame({0: arr2d[:, 0], 1: arr2d[:, 1]})
self.assert_frame_equal(df, expected)
def test_swapaxes(self, data):
arr2d = data.repeat(2).reshape(-1, 2)
result = arr2d.swapaxes(0, 1)
expected = arr2d.T
self.assert_extension_array_equal(result, expected)
def test_delete_2d(self, data):
arr2d = data.repeat(3).reshape(-1, 3)
result = arr2d.delete(1, axis=0)
expected = data.delete(1).repeat(3).reshape(-1, 3)
self.assert_extension_array_equal(result, expected)
result = arr2d.delete(1, axis=1)
expected = data.repeat(2).reshape(-1, 2)
self.assert_extension_array_equal(result, expected)
def test_take_2d(self, data):
arr2d = data.reshape(-1, 1)
result = arr2d.take([0, 0, -1], axis=0)
expected = data.take([0, 0, -1]).reshape(-1, 1)
self.assert_extension_array_equal(result, expected)
def test_repr_2d(self, data):
res = repr(data.reshape(1, -1))
assert res.count(f"<{type(data).__name__}") == 1
res = repr(data.reshape(-1, 1))
assert res.count(f"<{type(data).__name__}") == 1
def test_reshape(self, data):
arr2d = data.reshape(-1, 1)
assert arr2d.shape == (data.size, 1)
assert len(arr2d) == len(data)
arr2d = data.reshape((-1, 1))
assert arr2d.shape == (data.size, 1)
assert len(arr2d) == len(data)
with pytest.raises(ValueError):
data.reshape((data.size, 2))
with pytest.raises(ValueError):
data.reshape(data.size, 2)
def test_getitem_2d(self, data):
arr2d = data.reshape(1, -1)
result = arr2d[0]
self.assert_extension_array_equal(result, data)
with pytest.raises(IndexError):
arr2d[1]
with pytest.raises(IndexError):
arr2d[-2]
result = arr2d[:]
self.assert_extension_array_equal(result, arr2d)
result = arr2d[:, :]
self.assert_extension_array_equal(result, arr2d)
result = arr2d[:, 0]
expected = data[[0]]
self.assert_extension_array_equal(result, expected)
result = data[:, np.newaxis]
self.assert_extension_array_equal(result, arr2d.T)
def test_iter_2d(self, data):
arr2d = data.reshape(1, -1)
objs = list(iter(arr2d))
assert len(objs) == arr2d.shape[0]
for obj in objs:
assert isinstance(obj, type(data))
assert obj.dtype == data.dtype
assert obj.ndim == 1
assert len(obj) == arr2d.shape[1]
def test_tolist_2d(self, data):
arr2d = data.reshape(1, -1)
result = arr2d.tolist()
expected = [data.tolist()]
assert isinstance(result, list)
assert all(isinstance(x, list) for x in result)
assert result == expected
def test_concat_2d(self, data):
left = type(data)._concat_same_type([data, data]).reshape(-1, 2)
right = left.copy()
result = left._concat_same_type([left, right], axis=0)
expected = data._concat_same_type([data] * 4).reshape(-1, 2)
self.assert_extension_array_equal(result, expected)
result = left._concat_same_type([left, right], axis=1)
assert result.shape == (len(data), 4)
self.assert_extension_array_equal(result[:, :2], left)
self.assert_extension_array_equal(result[:, 2:], right)
msg = "axis 2 is out of bounds for array of dimension 2"
with pytest.raises(ValueError, match=msg):
left._concat_same_type([left, right], axis=2)
@pytest.mark.parametrize("method", ["backfill", "pad"])
def test_fillna_2d_method(self, data_missing, method):
arr = data_missing.repeat(2).reshape(2, 2)
assert arr[0].isna().all()
assert not arr[1].isna().any()
result = arr.fillna(method=method)
expected = data_missing.fillna(method=method).repeat(2).reshape(2, 2)
self.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize("method", ["mean", "median", "var", "std", "sum", "prod"])
def test_reductions_2d_axis_none(self, data, method):
arr2d = data.reshape(1, -1)
err_expected = None
err_result = None
try:
expected = getattr(data, method)()
except Exception as err:
err_expected = err
try:
result = getattr(arr2d, method)(axis=None)
except Exception as err2:
err_result = err2
else:
result = getattr(arr2d, method)(axis=None)
if err_result is not None or err_expected is not None:
assert type(err_result) == type(err_expected)
return
assert is_matching_na(result, expected) or result == expected
@pytest.mark.parametrize("method", ["mean", "median", "var", "std", "sum", "prod"])
def test_reductions_2d_axis0(self, data, method):
arr2d = data.reshape(1, -1)
kwargs = {}
if method == "std":
kwargs["ddof"] = 0
try:
result = getattr(arr2d, method)(axis=0, **kwargs)
except Exception as err:
try:
getattr(data, method)()
except Exception as err2:
assert type(err) == type(err2)
return
else:
raise AssertionError("Both reductions should raise or neither")
def get_reduction_result_dtype(dtype):
if dtype.itemsize == 8:
return dtype
elif dtype.kind in "ib":
return INT_STR_TO_DTYPE[np.dtype(int).name]
else:
return INT_STR_TO_DTYPE[np.dtype(np.uint).name]
if method in ["mean", "median", "sum", "prod"]:
expected = data
if method in ["sum", "prod"] and data.dtype.kind in "iub":
dtype = get_reduction_result_dtype(data.dtype)
expected = data.astype(dtype)
if data.dtype.kind == "b" and method in ["sum", "prod"]:
pass
else:
assert type(expected) == type(data), type(expected)
assert dtype == expected.dtype
self.assert_extension_array_equal(result, expected)
elif method == "std":
self.assert_extension_array_equal(result, data - data)
@pytest.mark.parametrize("method", ["mean", "median", "var", "std", "sum", "prod"])
def test_reductions_2d_axis1(self, data, method):
arr2d = data.reshape(1, -1)
try:
result = getattr(arr2d, method)(axis=1)
except Exception as err:
try:
getattr(data, method)()
except Exception as err2:
assert type(err) == type(err2)
return
else:
raise AssertionError("Both reductions should raise or neither")
assert result.shape == (1,)
expected_scalar = getattr(data, method)()
res = result[0]
assert is_matching_na(res, expected_scalar) or res == expected_scalar
class NDArrayBacked2DTests(Dim2CompatTests):
def test_copy_order(self, data):
arr2d = data.repeat(2).reshape(-1, 2)
assert arr2d._ndarray.flags["C_CONTIGUOUS"]
res = arr2d.copy()
assert res._ndarray.flags["C_CONTIGUOUS"]
res = arr2d[::2, ::2].copy()
assert res._ndarray.flags["C_CONTIGUOUS"]
res = arr2d.copy("F")
assert not res._ndarray.flags["C_CONTIGUOUS"]
assert res._ndarray.flags["F_CONTIGUOUS"]
res = arr2d.copy("K")
assert res._ndarray.flags["C_CONTIGUOUS"]
res = arr2d.T.copy("K")
assert not res._ndarray.flags["C_CONTIGUOUS"]
assert res._ndarray.flags["F_CONTIGUOUS"]
msg = r"order must be one of 'C', 'F', 'A', or 'K' \(got 'Q'\)"
with pytest.raises(ValueError, match=msg):
arr2d.copy("Q")
arr_nc = arr2d[::2]
assert not arr_nc._ndarray.flags["C_CONTIGUOUS"]
assert not arr_nc._ndarray.flags["F_CONTIGUOUS"]
assert arr_nc.copy()._ndarray.flags["C_CONTIGUOUS"]
assert not arr_nc.copy()._ndarray.flags["F_CONTIGUOUS"]
assert arr_nc.copy("C")._ndarray.flags["C_CONTIGUOUS"]
assert not arr_nc.copy("C")._ndarray.flags["F_CONTIGUOUS"]
assert not arr_nc.copy("F")._ndarray.flags["C_CONTIGUOUS"]
assert arr_nc.copy("F")._ndarray.flags["F_CONTIGUOUS"]
assert arr_nc.copy("K")._ndarray.flags["C_CONTIGUOUS"]
assert not arr_nc.copy("K")._ndarray.flags["F_CONTIGUOUS"]
| true | true |
f71f3fc2a1fe0183157711e98d38c579771e94c8 | 40,744 | py | Python | homeassistant/components/media_player/__init__.py | laundrify/core | 60387a417fb82b47700899a6b7e80b30dcc9766f | [
"Apache-2.0"
] | 2 | 2020-01-03T17:06:33.000Z | 2020-01-13T18:57:32.000Z | homeassistant/components/media_player/__init__.py | laundrify/core | 60387a417fb82b47700899a6b7e80b30dcc9766f | [
"Apache-2.0"
] | 20 | 2021-11-03T06:22:03.000Z | 2022-03-31T06:21:17.000Z | homeassistant/components/media_player/__init__.py | laundrify/core | 60387a417fb82b47700899a6b7e80b30dcc9766f | [
"Apache-2.0"
] | null | null | null | """Component to interface with various media players."""
from __future__ import annotations
import asyncio
import base64
import collections
from collections.abc import Callable
from contextlib import suppress
from dataclasses import dataclass
import datetime as dt
import functools as ft
import hashlib
from http import HTTPStatus
import logging
import secrets
from typing import Any, cast, final
from urllib.parse import urlparse
from aiohttp import web
from aiohttp.hdrs import CACHE_CONTROL, CONTENT_TYPE
from aiohttp.typedefs import LooseHeaders
import async_timeout
import voluptuous as vol
from yarl import URL
from homeassistant.backports.enum import StrEnum
from homeassistant.components import websocket_api
from homeassistant.components.http import KEY_AUTHENTICATED, HomeAssistantView
from homeassistant.components.websocket_api.const import (
ERR_NOT_FOUND,
ERR_NOT_SUPPORTED,
ERR_UNKNOWN_ERROR,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
SERVICE_MEDIA_NEXT_TRACK,
SERVICE_MEDIA_PAUSE,
SERVICE_MEDIA_PLAY,
SERVICE_MEDIA_PLAY_PAUSE,
SERVICE_MEDIA_PREVIOUS_TRACK,
SERVICE_MEDIA_SEEK,
SERVICE_MEDIA_STOP,
SERVICE_REPEAT_SET,
SERVICE_SHUFFLE_SET,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
SERVICE_VOLUME_DOWN,
SERVICE_VOLUME_MUTE,
SERVICE_VOLUME_SET,
SERVICE_VOLUME_UP,
STATE_IDLE,
STATE_OFF,
STATE_PLAYING,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import ( # noqa: F401
PLATFORM_SCHEMA,
PLATFORM_SCHEMA_BASE,
)
from homeassistant.helpers.entity import Entity, EntityDescription
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.network import get_url
from homeassistant.helpers.typing import ConfigType
from homeassistant.loader import bind_hass
from .browse_media import BrowseMedia, async_process_play_media_url # noqa: F401
from .const import ( # noqa: F401
ATTR_APP_ID,
ATTR_APP_NAME,
ATTR_ENTITY_PICTURE_LOCAL,
ATTR_GROUP_MEMBERS,
ATTR_INPUT_SOURCE,
ATTR_INPUT_SOURCE_LIST,
ATTR_MEDIA_ALBUM_ARTIST,
ATTR_MEDIA_ALBUM_NAME,
ATTR_MEDIA_ARTIST,
ATTR_MEDIA_CHANNEL,
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
ATTR_MEDIA_DURATION,
ATTR_MEDIA_ENQUEUE,
ATTR_MEDIA_EPISODE,
ATTR_MEDIA_EXTRA,
ATTR_MEDIA_PLAYLIST,
ATTR_MEDIA_POSITION,
ATTR_MEDIA_POSITION_UPDATED_AT,
ATTR_MEDIA_REPEAT,
ATTR_MEDIA_SEASON,
ATTR_MEDIA_SEEK_POSITION,
ATTR_MEDIA_SERIES_TITLE,
ATTR_MEDIA_SHUFFLE,
ATTR_MEDIA_TITLE,
ATTR_MEDIA_TRACK,
ATTR_MEDIA_VOLUME_LEVEL,
ATTR_MEDIA_VOLUME_MUTED,
ATTR_SOUND_MODE,
ATTR_SOUND_MODE_LIST,
CONTENT_AUTH_EXPIRY_TIME,
DOMAIN,
MEDIA_CLASS_DIRECTORY,
REPEAT_MODES,
SERVICE_CLEAR_PLAYLIST,
SERVICE_JOIN,
SERVICE_PLAY_MEDIA,
SERVICE_SELECT_SOUND_MODE,
SERVICE_SELECT_SOURCE,
SERVICE_UNJOIN,
SUPPORT_BROWSE_MEDIA,
SUPPORT_CLEAR_PLAYLIST,
SUPPORT_GROUPING,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_REPEAT_SET,
SUPPORT_SEEK,
SUPPORT_SELECT_SOUND_MODE,
SUPPORT_SELECT_SOURCE,
SUPPORT_SHUFFLE_SET,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
MediaPlayerEntityFeature,
)
from .errors import BrowseError
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
ENTITY_ID_FORMAT = DOMAIN + ".{}"
CACHE_IMAGES = "images"
CACHE_MAXSIZE = "maxsize"
CACHE_LOCK = "lock"
CACHE_URL = "url"
CACHE_CONTENT = "content"
ENTITY_IMAGE_CACHE = {CACHE_IMAGES: collections.OrderedDict(), CACHE_MAXSIZE: 16}
SCAN_INTERVAL = dt.timedelta(seconds=10)
class MediaPlayerEnqueue(StrEnum):
"""Enqueue types for playing media."""
# add given media item to end of the queue
ADD = "add"
# play the given media item next, keep queue
NEXT = "next"
# play the given media item now, keep queue
PLAY = "play"
# play the given media item now, clear queue
REPLACE = "replace"
class MediaPlayerDeviceClass(StrEnum):
"""Device class for media players."""
TV = "tv"
SPEAKER = "speaker"
RECEIVER = "receiver"
DEVICE_CLASSES_SCHEMA = vol.All(vol.Lower, vol.Coerce(MediaPlayerDeviceClass))
# DEVICE_CLASS* below are deprecated as of 2021.12
# use the MediaPlayerDeviceClass enum instead.
DEVICE_CLASSES = [cls.value for cls in MediaPlayerDeviceClass]
DEVICE_CLASS_TV = MediaPlayerDeviceClass.TV.value
DEVICE_CLASS_SPEAKER = MediaPlayerDeviceClass.SPEAKER.value
DEVICE_CLASS_RECEIVER = MediaPlayerDeviceClass.RECEIVER.value
MEDIA_PLAYER_PLAY_MEDIA_SCHEMA = {
vol.Required(ATTR_MEDIA_CONTENT_TYPE): cv.string,
vol.Required(ATTR_MEDIA_CONTENT_ID): cv.string,
vol.Optional(ATTR_MEDIA_ENQUEUE): vol.Any(
cv.boolean, vol.Coerce(MediaPlayerEnqueue)
),
vol.Optional(ATTR_MEDIA_EXTRA, default={}): dict,
}
ATTR_TO_PROPERTY = [
ATTR_MEDIA_VOLUME_LEVEL,
ATTR_MEDIA_VOLUME_MUTED,
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
ATTR_MEDIA_DURATION,
ATTR_MEDIA_POSITION,
ATTR_MEDIA_POSITION_UPDATED_AT,
ATTR_MEDIA_TITLE,
ATTR_MEDIA_ARTIST,
ATTR_MEDIA_ALBUM_NAME,
ATTR_MEDIA_ALBUM_ARTIST,
ATTR_MEDIA_TRACK,
ATTR_MEDIA_SERIES_TITLE,
ATTR_MEDIA_SEASON,
ATTR_MEDIA_EPISODE,
ATTR_MEDIA_CHANNEL,
ATTR_MEDIA_PLAYLIST,
ATTR_APP_ID,
ATTR_APP_NAME,
ATTR_INPUT_SOURCE,
ATTR_SOUND_MODE,
ATTR_MEDIA_SHUFFLE,
ATTR_MEDIA_REPEAT,
]
@bind_hass
def is_on(hass, entity_id=None):
"""
Return true if specified media player entity_id is on.
Check all media player if no entity_id specified.
"""
entity_ids = [entity_id] if entity_id else hass.states.entity_ids(DOMAIN)
return any(
not hass.states.is_state(entity_id, STATE_OFF) for entity_id in entity_ids
)
def _rename_keys(**keys: Any) -> Callable[[dict[str, Any]], dict[str, Any]]:
"""Create validator that renames keys.
Necessary because the service schema names do not match the command parameters.
Async friendly.
"""
def rename(value: dict[str, Any]) -> dict[str, Any]:
for to_key, from_key in keys.items():
if from_key in value:
value[to_key] = value.pop(from_key)
return value
return rename
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Track states and offer events for media_players."""
component = hass.data[DOMAIN] = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL
)
websocket_api.async_register_command(hass, websocket_handle_thumbnail)
websocket_api.async_register_command(hass, websocket_browse_media)
hass.http.register_view(MediaPlayerImageView(component))
await component.async_setup(config)
component.async_register_entity_service(
SERVICE_TURN_ON, {}, "async_turn_on", [MediaPlayerEntityFeature.TURN_ON]
)
component.async_register_entity_service(
SERVICE_TURN_OFF, {}, "async_turn_off", [MediaPlayerEntityFeature.TURN_OFF]
)
component.async_register_entity_service(
SERVICE_TOGGLE,
{},
"async_toggle",
[MediaPlayerEntityFeature.TURN_OFF | MediaPlayerEntityFeature.TURN_ON],
)
component.async_register_entity_service(
SERVICE_VOLUME_UP,
{},
"async_volume_up",
[MediaPlayerEntityFeature.VOLUME_SET, MediaPlayerEntityFeature.VOLUME_STEP],
)
component.async_register_entity_service(
SERVICE_VOLUME_DOWN,
{},
"async_volume_down",
[MediaPlayerEntityFeature.VOLUME_SET, MediaPlayerEntityFeature.VOLUME_STEP],
)
component.async_register_entity_service(
SERVICE_MEDIA_PLAY_PAUSE,
{},
"async_media_play_pause",
[MediaPlayerEntityFeature.PLAY | MediaPlayerEntityFeature.PAUSE],
)
component.async_register_entity_service(
SERVICE_MEDIA_PLAY, {}, "async_media_play", [MediaPlayerEntityFeature.PLAY]
)
component.async_register_entity_service(
SERVICE_MEDIA_PAUSE, {}, "async_media_pause", [MediaPlayerEntityFeature.PAUSE]
)
component.async_register_entity_service(
SERVICE_MEDIA_STOP, {}, "async_media_stop", [MediaPlayerEntityFeature.STOP]
)
component.async_register_entity_service(
SERVICE_MEDIA_NEXT_TRACK,
{},
"async_media_next_track",
[MediaPlayerEntityFeature.NEXT_TRACK],
)
component.async_register_entity_service(
SERVICE_MEDIA_PREVIOUS_TRACK,
{},
"async_media_previous_track",
[MediaPlayerEntityFeature.PREVIOUS_TRACK],
)
component.async_register_entity_service(
SERVICE_CLEAR_PLAYLIST,
{},
"async_clear_playlist",
[MediaPlayerEntityFeature.CLEAR_PLAYLIST],
)
component.async_register_entity_service(
SERVICE_VOLUME_SET,
vol.All(
cv.make_entity_service_schema(
{vol.Required(ATTR_MEDIA_VOLUME_LEVEL): cv.small_float}
),
_rename_keys(volume=ATTR_MEDIA_VOLUME_LEVEL),
),
"async_set_volume_level",
[MediaPlayerEntityFeature.VOLUME_SET],
)
component.async_register_entity_service(
SERVICE_VOLUME_MUTE,
vol.All(
cv.make_entity_service_schema(
{vol.Required(ATTR_MEDIA_VOLUME_MUTED): cv.boolean}
),
_rename_keys(mute=ATTR_MEDIA_VOLUME_MUTED),
),
"async_mute_volume",
[MediaPlayerEntityFeature.VOLUME_MUTE],
)
component.async_register_entity_service(
SERVICE_MEDIA_SEEK,
vol.All(
cv.make_entity_service_schema(
{vol.Required(ATTR_MEDIA_SEEK_POSITION): cv.positive_float}
),
_rename_keys(position=ATTR_MEDIA_SEEK_POSITION),
),
"async_media_seek",
[MediaPlayerEntityFeature.SEEK],
)
component.async_register_entity_service(
SERVICE_JOIN,
{vol.Required(ATTR_GROUP_MEMBERS): vol.All(cv.ensure_list, [cv.entity_id])},
"async_join_players",
[MediaPlayerEntityFeature.GROUPING],
)
component.async_register_entity_service(
SERVICE_SELECT_SOURCE,
{vol.Required(ATTR_INPUT_SOURCE): cv.string},
"async_select_source",
[MediaPlayerEntityFeature.SELECT_SOURCE],
)
component.async_register_entity_service(
SERVICE_SELECT_SOUND_MODE,
{vol.Required(ATTR_SOUND_MODE): cv.string},
"async_select_sound_mode",
[MediaPlayerEntityFeature.SELECT_SOUND_MODE],
)
# Remove in Home Assistant 2022.9
def _rewrite_enqueue(value):
"""Rewrite the enqueue value."""
if ATTR_MEDIA_ENQUEUE not in value:
pass
elif value[ATTR_MEDIA_ENQUEUE] is True:
value[ATTR_MEDIA_ENQUEUE] = MediaPlayerEnqueue.ADD
_LOGGER.warning(
"Playing media with enqueue set to True is deprecated. Use 'add' instead"
)
elif value[ATTR_MEDIA_ENQUEUE] is False:
value[ATTR_MEDIA_ENQUEUE] = MediaPlayerEnqueue.PLAY
_LOGGER.warning(
"Playing media with enqueue set to False is deprecated. Use 'play' instead"
)
return value
component.async_register_entity_service(
SERVICE_PLAY_MEDIA,
vol.All(
cv.make_entity_service_schema(MEDIA_PLAYER_PLAY_MEDIA_SCHEMA),
_rewrite_enqueue,
_rename_keys(
media_type=ATTR_MEDIA_CONTENT_TYPE,
media_id=ATTR_MEDIA_CONTENT_ID,
enqueue=ATTR_MEDIA_ENQUEUE,
),
),
"async_play_media",
[MediaPlayerEntityFeature.PLAY_MEDIA],
)
component.async_register_entity_service(
SERVICE_SHUFFLE_SET,
{vol.Required(ATTR_MEDIA_SHUFFLE): cv.boolean},
"async_set_shuffle",
[MediaPlayerEntityFeature.SHUFFLE_SET],
)
component.async_register_entity_service(
SERVICE_UNJOIN, {}, "async_unjoin_player", [MediaPlayerEntityFeature.GROUPING]
)
component.async_register_entity_service(
SERVICE_REPEAT_SET,
{vol.Required(ATTR_MEDIA_REPEAT): vol.In(REPEAT_MODES)},
"async_set_repeat",
[MediaPlayerEntityFeature.REPEAT_SET],
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up a config entry."""
component: EntityComponent = hass.data[DOMAIN]
return await component.async_setup_entry(entry)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
component: EntityComponent = hass.data[DOMAIN]
return await component.async_unload_entry(entry)
@dataclass
class MediaPlayerEntityDescription(EntityDescription):
"""A class that describes media player entities."""
device_class: MediaPlayerDeviceClass | str | None = None
class MediaPlayerEntity(Entity):
"""ABC for media player entities."""
entity_description: MediaPlayerEntityDescription
_access_token: str | None = None
_attr_app_id: str | None = None
_attr_app_name: str | None = None
_attr_device_class: MediaPlayerDeviceClass | str | None
_attr_group_members: list[str] | None = None
_attr_is_volume_muted: bool | None = None
_attr_media_album_artist: str | None = None
_attr_media_album_name: str | None = None
_attr_media_artist: str | None = None
_attr_media_channel: str | None = None
_attr_media_content_id: str | None = None
_attr_media_content_type: str | None = None
_attr_media_duration: int | None = None
_attr_media_episode: str | None = None
_attr_media_image_hash: str | None
_attr_media_image_remotely_accessible: bool = False
_attr_media_image_url: str | None = None
_attr_media_playlist: str | None = None
_attr_media_position_updated_at: dt.datetime | None = None
_attr_media_position: int | None = None
_attr_media_season: str | None = None
_attr_media_series_title: str | None = None
_attr_media_title: str | None = None
_attr_media_track: int | None = None
_attr_repeat: str | None = None
_attr_shuffle: bool | None = None
_attr_sound_mode_list: list[str] | None = None
_attr_sound_mode: str | None = None
_attr_source_list: list[str] | None = None
_attr_source: str | None = None
_attr_state: str | None = None
_attr_supported_features: int = 0
_attr_volume_level: float | None = None
# Implement these for your media player
@property
def device_class(self) -> MediaPlayerDeviceClass | str | None:
"""Return the class of this entity."""
if hasattr(self, "_attr_device_class"):
return self._attr_device_class
if hasattr(self, "entity_description"):
return self.entity_description.device_class
return None
@property
def state(self) -> str | None:
"""State of the player."""
return self._attr_state
@property
def access_token(self) -> str:
"""Access token for this media player."""
if self._access_token is None:
self._access_token = secrets.token_hex(32)
return self._access_token
@property
def volume_level(self) -> float | None:
"""Volume level of the media player (0..1)."""
return self._attr_volume_level
@property
def is_volume_muted(self) -> bool | None:
"""Boolean if volume is currently muted."""
return self._attr_is_volume_muted
@property
def media_content_id(self) -> str | None:
"""Content ID of current playing media."""
return self._attr_media_content_id
@property
def media_content_type(self) -> str | None:
"""Content type of current playing media."""
return self._attr_media_content_type
@property
def media_duration(self) -> int | None:
"""Duration of current playing media in seconds."""
return self._attr_media_duration
@property
def media_position(self) -> int | None:
"""Position of current playing media in seconds."""
return self._attr_media_position
@property
def media_position_updated_at(self) -> dt.datetime | None:
"""When was the position of the current playing media valid.
Returns value from homeassistant.util.dt.utcnow().
"""
return self._attr_media_position_updated_at
@property
def media_image_url(self) -> str | None:
"""Image url of current playing media."""
return self._attr_media_image_url
@property
def media_image_remotely_accessible(self) -> bool:
"""If the image url is remotely accessible."""
return self._attr_media_image_remotely_accessible
@property
def media_image_hash(self) -> str | None:
"""Hash value for media image."""
if hasattr(self, "_attr_media_image_hash"):
return self._attr_media_image_hash
if (url := self.media_image_url) is not None:
return hashlib.sha256(url.encode("utf-8")).hexdigest()[:16]
return None
async def async_get_media_image(self) -> tuple[bytes | None, str | None]:
"""Fetch media image of current playing image."""
if (url := self.media_image_url) is None:
return None, None
return await self._async_fetch_image_from_cache(url)
async def async_get_browse_image(
self,
media_content_type: str,
media_content_id: str,
media_image_id: str | None = None,
) -> tuple[bytes | None, str | None]:
"""
Optionally fetch internally accessible image for media browser.
Must be implemented by integration.
"""
return None, None
@property
def media_title(self) -> str | None:
"""Title of current playing media."""
return self._attr_media_title
@property
def media_artist(self) -> str | None:
"""Artist of current playing media, music track only."""
return self._attr_media_artist
@property
def media_album_name(self) -> str | None:
"""Album name of current playing media, music track only."""
return self._attr_media_album_name
@property
def media_album_artist(self) -> str | None:
"""Album artist of current playing media, music track only."""
return self._attr_media_album_artist
@property
def media_track(self) -> int | None:
"""Track number of current playing media, music track only."""
return self._attr_media_track
@property
def media_series_title(self) -> str | None:
"""Title of series of current playing media, TV show only."""
return self._attr_media_series_title
@property
def media_season(self) -> str | None:
"""Season of current playing media, TV show only."""
return self._attr_media_season
@property
def media_episode(self) -> str | None:
"""Episode of current playing media, TV show only."""
return self._attr_media_episode
@property
def media_channel(self) -> str | None:
"""Channel currently playing."""
return self._attr_media_channel
@property
def media_playlist(self) -> str | None:
"""Title of Playlist currently playing."""
return self._attr_media_playlist
@property
def app_id(self) -> str | None:
"""ID of the current running app."""
return self._attr_app_id
@property
def app_name(self) -> str | None:
"""Name of the current running app."""
return self._attr_app_name
@property
def source(self) -> str | None:
"""Name of the current input source."""
return self._attr_source
@property
def source_list(self) -> list[str] | None:
"""List of available input sources."""
return self._attr_source_list
@property
def sound_mode(self) -> str | None:
"""Name of the current sound mode."""
return self._attr_sound_mode
@property
def sound_mode_list(self) -> list[str] | None:
"""List of available sound modes."""
return self._attr_sound_mode_list
@property
def shuffle(self) -> bool | None:
"""Boolean if shuffle is enabled."""
return self._attr_shuffle
@property
def repeat(self) -> str | None:
"""Return current repeat mode."""
return self._attr_repeat
@property
def group_members(self) -> list[str] | None:
"""List of members which are currently grouped together."""
return self._attr_group_members
@property
def supported_features(self) -> int:
"""Flag media player features that are supported."""
return self._attr_supported_features
def turn_on(self):
"""Turn the media player on."""
raise NotImplementedError()
async def async_turn_on(self):
"""Turn the media player on."""
await self.hass.async_add_executor_job(self.turn_on)
def turn_off(self):
"""Turn the media player off."""
raise NotImplementedError()
async def async_turn_off(self):
"""Turn the media player off."""
await self.hass.async_add_executor_job(self.turn_off)
def mute_volume(self, mute):
"""Mute the volume."""
raise NotImplementedError()
async def async_mute_volume(self, mute):
"""Mute the volume."""
await self.hass.async_add_executor_job(self.mute_volume, mute)
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
raise NotImplementedError()
async def async_set_volume_level(self, volume):
"""Set volume level, range 0..1."""
await self.hass.async_add_executor_job(self.set_volume_level, volume)
def media_play(self):
"""Send play command."""
raise NotImplementedError()
async def async_media_play(self):
"""Send play command."""
await self.hass.async_add_executor_job(self.media_play)
def media_pause(self):
"""Send pause command."""
raise NotImplementedError()
async def async_media_pause(self):
"""Send pause command."""
await self.hass.async_add_executor_job(self.media_pause)
def media_stop(self):
"""Send stop command."""
raise NotImplementedError()
async def async_media_stop(self):
"""Send stop command."""
await self.hass.async_add_executor_job(self.media_stop)
def media_previous_track(self):
"""Send previous track command."""
raise NotImplementedError()
async def async_media_previous_track(self):
"""Send previous track command."""
await self.hass.async_add_executor_job(self.media_previous_track)
def media_next_track(self):
"""Send next track command."""
raise NotImplementedError()
async def async_media_next_track(self):
"""Send next track command."""
await self.hass.async_add_executor_job(self.media_next_track)
def media_seek(self, position):
"""Send seek command."""
raise NotImplementedError()
async def async_media_seek(self, position):
"""Send seek command."""
await self.hass.async_add_executor_job(self.media_seek, position)
def play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
raise NotImplementedError()
async def async_play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
await self.hass.async_add_executor_job(
ft.partial(self.play_media, media_type, media_id, **kwargs)
)
def select_source(self, source):
"""Select input source."""
raise NotImplementedError()
async def async_select_source(self, source):
"""Select input source."""
await self.hass.async_add_executor_job(self.select_source, source)
def select_sound_mode(self, sound_mode):
"""Select sound mode."""
raise NotImplementedError()
async def async_select_sound_mode(self, sound_mode):
"""Select sound mode."""
await self.hass.async_add_executor_job(self.select_sound_mode, sound_mode)
def clear_playlist(self):
"""Clear players playlist."""
raise NotImplementedError()
async def async_clear_playlist(self):
"""Clear players playlist."""
await self.hass.async_add_executor_job(self.clear_playlist)
def set_shuffle(self, shuffle):
"""Enable/disable shuffle mode."""
raise NotImplementedError()
async def async_set_shuffle(self, shuffle):
"""Enable/disable shuffle mode."""
await self.hass.async_add_executor_job(self.set_shuffle, shuffle)
def set_repeat(self, repeat):
"""Set repeat mode."""
raise NotImplementedError()
async def async_set_repeat(self, repeat):
"""Set repeat mode."""
await self.hass.async_add_executor_job(self.set_repeat, repeat)
# No need to overwrite these.
@property
def support_play(self):
"""Boolean if play is supported."""
return bool(self.supported_features & MediaPlayerEntityFeature.PLAY)
@property
def support_pause(self):
"""Boolean if pause is supported."""
return bool(self.supported_features & MediaPlayerEntityFeature.PAUSE)
@property
def support_stop(self):
"""Boolean if stop is supported."""
return bool(self.supported_features & MediaPlayerEntityFeature.STOP)
@property
def support_seek(self):
"""Boolean if seek is supported."""
return bool(self.supported_features & MediaPlayerEntityFeature.SEEK)
@property
def support_volume_set(self):
"""Boolean if setting volume is supported."""
return bool(self.supported_features & MediaPlayerEntityFeature.VOLUME_SET)
@property
def support_volume_mute(self):
"""Boolean if muting volume is supported."""
return bool(self.supported_features & MediaPlayerEntityFeature.VOLUME_MUTE)
@property
def support_previous_track(self):
"""Boolean if previous track command supported."""
return bool(self.supported_features & MediaPlayerEntityFeature.PREVIOUS_TRACK)
@property
def support_next_track(self):
"""Boolean if next track command supported."""
return bool(self.supported_features & MediaPlayerEntityFeature.NEXT_TRACK)
@property
def support_play_media(self):
"""Boolean if play media command supported."""
return bool(self.supported_features & MediaPlayerEntityFeature.PLAY_MEDIA)
@property
def support_select_source(self):
"""Boolean if select source command supported."""
return bool(self.supported_features & MediaPlayerEntityFeature.SELECT_SOURCE)
@property
def support_select_sound_mode(self):
"""Boolean if select sound mode command supported."""
return bool(
self.supported_features & MediaPlayerEntityFeature.SELECT_SOUND_MODE
)
@property
def support_clear_playlist(self):
"""Boolean if clear playlist command supported."""
return bool(self.supported_features & MediaPlayerEntityFeature.CLEAR_PLAYLIST)
@property
def support_shuffle_set(self):
"""Boolean if shuffle is supported."""
return bool(self.supported_features & MediaPlayerEntityFeature.SHUFFLE_SET)
@property
def support_grouping(self):
"""Boolean if player grouping is supported."""
return bool(self.supported_features & MediaPlayerEntityFeature.GROUPING)
async def async_toggle(self):
"""Toggle the power on the media player."""
if hasattr(self, "toggle"):
await self.hass.async_add_executor_job(self.toggle)
return
if self.state in (STATE_OFF, STATE_IDLE):
await self.async_turn_on()
else:
await self.async_turn_off()
async def async_volume_up(self):
"""Turn volume up for media player.
This method is a coroutine.
"""
if hasattr(self, "volume_up"):
await self.hass.async_add_executor_job(self.volume_up)
return
if (
self.volume_level < 1
and self.supported_features & MediaPlayerEntityFeature.VOLUME_SET
):
await self.async_set_volume_level(min(1, self.volume_level + 0.1))
async def async_volume_down(self):
"""Turn volume down for media player.
This method is a coroutine.
"""
if hasattr(self, "volume_down"):
await self.hass.async_add_executor_job(self.volume_down)
return
if (
self.volume_level > 0
and self.supported_features & MediaPlayerEntityFeature.VOLUME_SET
):
await self.async_set_volume_level(max(0, self.volume_level - 0.1))
async def async_media_play_pause(self):
"""Play or pause the media player."""
if hasattr(self, "media_play_pause"):
await self.hass.async_add_executor_job(self.media_play_pause)
return
if self.state == STATE_PLAYING:
await self.async_media_pause()
else:
await self.async_media_play()
@property
def entity_picture(self):
"""Return image of the media playing."""
if self.state == STATE_OFF:
return None
if self.media_image_remotely_accessible:
return self.media_image_url
return self.media_image_local
@property
def media_image_local(self):
"""Return local url to media image."""
if (image_hash := self.media_image_hash) is None:
return None
return (
f"/api/media_player_proxy/{self.entity_id}?"
f"token={self.access_token}&cache={image_hash}"
)
@property
def capability_attributes(self):
"""Return capability attributes."""
supported_features = self.supported_features or 0
data = {}
if supported_features & MediaPlayerEntityFeature.SELECT_SOURCE and (
source_list := self.source_list
):
data[ATTR_INPUT_SOURCE_LIST] = source_list
if supported_features & MediaPlayerEntityFeature.SELECT_SOUND_MODE and (
sound_mode_list := self.sound_mode_list
):
data[ATTR_SOUND_MODE_LIST] = sound_mode_list
return data
@final
@property
def state_attributes(self):
"""Return the state attributes."""
state_attr = {}
if self.support_grouping:
state_attr[ATTR_GROUP_MEMBERS] = self.group_members
if self.state == STATE_OFF:
return state_attr
for attr in ATTR_TO_PROPERTY:
if (value := getattr(self, attr)) is not None:
state_attr[attr] = value
if self.media_image_remotely_accessible:
state_attr[ATTR_ENTITY_PICTURE_LOCAL] = self.media_image_local
return state_attr
async def async_browse_media(
self,
media_content_type: str | None = None,
media_content_id: str | None = None,
) -> BrowseMedia:
"""Return a BrowseMedia instance.
The BrowseMedia instance will be used by the
"media_player/browse_media" websocket command.
"""
raise NotImplementedError()
def join_players(self, group_members):
"""Join `group_members` as a player group with the current player."""
raise NotImplementedError()
async def async_join_players(self, group_members):
"""Join `group_members` as a player group with the current player."""
await self.hass.async_add_executor_job(self.join_players, group_members)
def unjoin_player(self):
"""Remove this player from any group."""
raise NotImplementedError()
async def async_unjoin_player(self):
"""Remove this player from any group."""
await self.hass.async_add_executor_job(self.unjoin_player)
async def _async_fetch_image_from_cache(
self, url: str
) -> tuple[bytes | None, str | None]:
"""Fetch image.
Images are cached in memory (the images are typically 10-100kB in size).
"""
cache_images = cast(collections.OrderedDict, ENTITY_IMAGE_CACHE[CACHE_IMAGES])
cache_maxsize = cast(int, ENTITY_IMAGE_CACHE[CACHE_MAXSIZE])
if urlparse(url).hostname is None:
url = f"{get_url(self.hass)}{url}"
if url not in cache_images:
cache_images[url] = {CACHE_LOCK: asyncio.Lock()}
async with cache_images[url][CACHE_LOCK]:
if CACHE_CONTENT in cache_images[url]:
return cache_images[url][CACHE_CONTENT] # type:ignore[no-any-return]
(content, content_type) = await self._async_fetch_image(url)
async with cache_images[url][CACHE_LOCK]:
cache_images[url][CACHE_CONTENT] = content, content_type
while len(cache_images) > cache_maxsize:
cache_images.popitem(last=False)
return content, content_type
async def _async_fetch_image(self, url: str) -> tuple[bytes | None, str | None]:
"""Retrieve an image."""
return await async_fetch_image(_LOGGER, self.hass, url)
def get_browse_image_url(
self,
media_content_type: str,
media_content_id: str,
media_image_id: str | None = None,
) -> str:
"""Generate an url for a media browser image."""
url_path = (
f"/api/media_player_proxy/{self.entity_id}/browse_media"
f"/{media_content_type}/{media_content_id}"
)
url_query = {"token": self.access_token}
if media_image_id:
url_query["media_image_id"] = media_image_id
return str(URL(url_path).with_query(url_query))
class MediaPlayerImageView(HomeAssistantView):
"""Media player view to serve an image."""
requires_auth = False
url = "/api/media_player_proxy/{entity_id}"
name = "api:media_player:image"
extra_urls = [
url + "/browse_media/{media_content_type}/{media_content_id}",
]
def __init__(self, component: EntityComponent) -> None:
"""Initialize a media player view."""
self.component = component
async def get(
self,
request: web.Request,
entity_id: str,
media_content_type: str | None = None,
media_content_id: str | None = None,
) -> web.Response:
"""Start a get request."""
if (player := self.component.get_entity(entity_id)) is None:
status = (
HTTPStatus.NOT_FOUND
if request[KEY_AUTHENTICATED]
else HTTPStatus.UNAUTHORIZED
)
return web.Response(status=status)
assert isinstance(player, MediaPlayerEntity)
authenticated = (
request[KEY_AUTHENTICATED]
or request.query.get("token") == player.access_token
)
if not authenticated:
return web.Response(status=HTTPStatus.UNAUTHORIZED)
if media_content_type and media_content_id:
media_image_id = request.query.get("media_image_id")
data, content_type = await player.async_get_browse_image(
media_content_type, media_content_id, media_image_id
)
else:
data, content_type = await player.async_get_media_image()
if data is None:
return web.Response(status=HTTPStatus.INTERNAL_SERVER_ERROR)
headers: LooseHeaders = {CACHE_CONTROL: "max-age=3600"}
return web.Response(body=data, content_type=content_type, headers=headers)
@websocket_api.websocket_command(
{
vol.Required("type"): "media_player_thumbnail",
vol.Required("entity_id"): cv.entity_id,
}
)
@websocket_api.async_response
async def websocket_handle_thumbnail(hass, connection, msg):
"""Handle get media player cover command.
Async friendly.
"""
component = hass.data[DOMAIN]
if (player := component.get_entity(msg["entity_id"])) is None:
connection.send_message(
websocket_api.error_message(msg["id"], ERR_NOT_FOUND, "Entity not found")
)
return
_LOGGER.warning(
"The websocket command media_player_thumbnail is deprecated. Use /api/media_player_proxy instead"
)
data, content_type = await player.async_get_media_image()
if data is None:
connection.send_message(
websocket_api.error_message(
msg["id"], "thumbnail_fetch_failed", "Failed to fetch thumbnail"
)
)
return
await connection.send_big_result(
msg["id"],
{
"content_type": content_type,
"content": base64.b64encode(data).decode("utf-8"),
},
)
@websocket_api.websocket_command(
{
vol.Required("type"): "media_player/browse_media",
vol.Required("entity_id"): cv.entity_id,
vol.Inclusive(
ATTR_MEDIA_CONTENT_TYPE,
"media_ids",
"media_content_type and media_content_id must be provided together",
): str,
vol.Inclusive(
ATTR_MEDIA_CONTENT_ID,
"media_ids",
"media_content_type and media_content_id must be provided together",
): str,
}
)
@websocket_api.async_response
async def websocket_browse_media(hass, connection, msg):
"""
Browse media available to the media_player entity.
To use, media_player integrations can implement MediaPlayerEntity.async_browse_media()
"""
component = hass.data[DOMAIN]
player: MediaPlayerEntity | None = component.get_entity(msg["entity_id"])
if player is None:
connection.send_error(msg["id"], "entity_not_found", "Entity not found")
return
if not player.supported_features & MediaPlayerEntityFeature.BROWSE_MEDIA:
connection.send_message(
websocket_api.error_message(
msg["id"], ERR_NOT_SUPPORTED, "Player does not support browsing media"
)
)
return
media_content_type = msg.get(ATTR_MEDIA_CONTENT_TYPE)
media_content_id = msg.get(ATTR_MEDIA_CONTENT_ID)
try:
payload = await player.async_browse_media(media_content_type, media_content_id)
except NotImplementedError:
_LOGGER.error(
"%s allows media browsing but its integration (%s) does not",
player.entity_id,
player.platform.platform_name,
)
connection.send_message(
websocket_api.error_message(
msg["id"],
ERR_NOT_SUPPORTED,
"Integration does not support browsing media",
)
)
return
except BrowseError as err:
connection.send_message(
websocket_api.error_message(msg["id"], ERR_UNKNOWN_ERROR, str(err))
)
return
# For backwards compat
if isinstance(payload, BrowseMedia):
payload = payload.as_dict()
else:
_LOGGER.warning("Browse Media should use new BrowseMedia class")
connection.send_result(msg["id"], payload)
async def async_fetch_image(
logger: logging.Logger, hass: HomeAssistant, url: str
) -> tuple[bytes | None, str | None]:
"""Retrieve an image."""
content, content_type = (None, None)
websession = async_get_clientsession(hass)
with suppress(asyncio.TimeoutError), async_timeout.timeout(10):
response = await websession.get(url)
if response.status == HTTPStatus.OK:
content = await response.read()
if content_type := response.headers.get(CONTENT_TYPE):
content_type = content_type.split(";")[0]
if content is None:
url_parts = URL(url)
if url_parts.user is not None:
url_parts = url_parts.with_user("xxxx")
if url_parts.password is not None:
url_parts = url_parts.with_password("xxxxxxxx")
url = str(url_parts)
logger.warning("Error retrieving proxied image from %s", url)
return content, content_type
| 32.132492 | 105 | 0.669964 | from __future__ import annotations
import asyncio
import base64
import collections
from collections.abc import Callable
from contextlib import suppress
from dataclasses import dataclass
import datetime as dt
import functools as ft
import hashlib
from http import HTTPStatus
import logging
import secrets
from typing import Any, cast, final
from urllib.parse import urlparse
from aiohttp import web
from aiohttp.hdrs import CACHE_CONTROL, CONTENT_TYPE
from aiohttp.typedefs import LooseHeaders
import async_timeout
import voluptuous as vol
from yarl import URL
from homeassistant.backports.enum import StrEnum
from homeassistant.components import websocket_api
from homeassistant.components.http import KEY_AUTHENTICATED, HomeAssistantView
from homeassistant.components.websocket_api.const import (
ERR_NOT_FOUND,
ERR_NOT_SUPPORTED,
ERR_UNKNOWN_ERROR,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
SERVICE_MEDIA_NEXT_TRACK,
SERVICE_MEDIA_PAUSE,
SERVICE_MEDIA_PLAY,
SERVICE_MEDIA_PLAY_PAUSE,
SERVICE_MEDIA_PREVIOUS_TRACK,
SERVICE_MEDIA_SEEK,
SERVICE_MEDIA_STOP,
SERVICE_REPEAT_SET,
SERVICE_SHUFFLE_SET,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
SERVICE_VOLUME_DOWN,
SERVICE_VOLUME_MUTE,
SERVICE_VOLUME_SET,
SERVICE_VOLUME_UP,
STATE_IDLE,
STATE_OFF,
STATE_PLAYING,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import (
PLATFORM_SCHEMA,
PLATFORM_SCHEMA_BASE,
)
from homeassistant.helpers.entity import Entity, EntityDescription
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.network import get_url
from homeassistant.helpers.typing import ConfigType
from homeassistant.loader import bind_hass
from .browse_media import BrowseMedia, async_process_play_media_url
from .const import (
ATTR_APP_ID,
ATTR_APP_NAME,
ATTR_ENTITY_PICTURE_LOCAL,
ATTR_GROUP_MEMBERS,
ATTR_INPUT_SOURCE,
ATTR_INPUT_SOURCE_LIST,
ATTR_MEDIA_ALBUM_ARTIST,
ATTR_MEDIA_ALBUM_NAME,
ATTR_MEDIA_ARTIST,
ATTR_MEDIA_CHANNEL,
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
ATTR_MEDIA_DURATION,
ATTR_MEDIA_ENQUEUE,
ATTR_MEDIA_EPISODE,
ATTR_MEDIA_EXTRA,
ATTR_MEDIA_PLAYLIST,
ATTR_MEDIA_POSITION,
ATTR_MEDIA_POSITION_UPDATED_AT,
ATTR_MEDIA_REPEAT,
ATTR_MEDIA_SEASON,
ATTR_MEDIA_SEEK_POSITION,
ATTR_MEDIA_SERIES_TITLE,
ATTR_MEDIA_SHUFFLE,
ATTR_MEDIA_TITLE,
ATTR_MEDIA_TRACK,
ATTR_MEDIA_VOLUME_LEVEL,
ATTR_MEDIA_VOLUME_MUTED,
ATTR_SOUND_MODE,
ATTR_SOUND_MODE_LIST,
CONTENT_AUTH_EXPIRY_TIME,
DOMAIN,
MEDIA_CLASS_DIRECTORY,
REPEAT_MODES,
SERVICE_CLEAR_PLAYLIST,
SERVICE_JOIN,
SERVICE_PLAY_MEDIA,
SERVICE_SELECT_SOUND_MODE,
SERVICE_SELECT_SOURCE,
SERVICE_UNJOIN,
SUPPORT_BROWSE_MEDIA,
SUPPORT_CLEAR_PLAYLIST,
SUPPORT_GROUPING,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_REPEAT_SET,
SUPPORT_SEEK,
SUPPORT_SELECT_SOUND_MODE,
SUPPORT_SELECT_SOURCE,
SUPPORT_SHUFFLE_SET,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
MediaPlayerEntityFeature,
)
from .errors import BrowseError
_LOGGER = logging.getLogger(__name__)
ENTITY_ID_FORMAT = DOMAIN + ".{}"
CACHE_IMAGES = "images"
CACHE_MAXSIZE = "maxsize"
CACHE_LOCK = "lock"
CACHE_URL = "url"
CACHE_CONTENT = "content"
ENTITY_IMAGE_CACHE = {CACHE_IMAGES: collections.OrderedDict(), CACHE_MAXSIZE: 16}
SCAN_INTERVAL = dt.timedelta(seconds=10)
class MediaPlayerEnqueue(StrEnum):
ADD = "add"
NEXT = "next"
PLAY = "play"
REPLACE = "replace"
class MediaPlayerDeviceClass(StrEnum):
TV = "tv"
SPEAKER = "speaker"
RECEIVER = "receiver"
DEVICE_CLASSES_SCHEMA = vol.All(vol.Lower, vol.Coerce(MediaPlayerDeviceClass))
DEVICE_CLASSES = [cls.value for cls in MediaPlayerDeviceClass]
DEVICE_CLASS_TV = MediaPlayerDeviceClass.TV.value
DEVICE_CLASS_SPEAKER = MediaPlayerDeviceClass.SPEAKER.value
DEVICE_CLASS_RECEIVER = MediaPlayerDeviceClass.RECEIVER.value
MEDIA_PLAYER_PLAY_MEDIA_SCHEMA = {
vol.Required(ATTR_MEDIA_CONTENT_TYPE): cv.string,
vol.Required(ATTR_MEDIA_CONTENT_ID): cv.string,
vol.Optional(ATTR_MEDIA_ENQUEUE): vol.Any(
cv.boolean, vol.Coerce(MediaPlayerEnqueue)
),
vol.Optional(ATTR_MEDIA_EXTRA, default={}): dict,
}
ATTR_TO_PROPERTY = [
ATTR_MEDIA_VOLUME_LEVEL,
ATTR_MEDIA_VOLUME_MUTED,
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
ATTR_MEDIA_DURATION,
ATTR_MEDIA_POSITION,
ATTR_MEDIA_POSITION_UPDATED_AT,
ATTR_MEDIA_TITLE,
ATTR_MEDIA_ARTIST,
ATTR_MEDIA_ALBUM_NAME,
ATTR_MEDIA_ALBUM_ARTIST,
ATTR_MEDIA_TRACK,
ATTR_MEDIA_SERIES_TITLE,
ATTR_MEDIA_SEASON,
ATTR_MEDIA_EPISODE,
ATTR_MEDIA_CHANNEL,
ATTR_MEDIA_PLAYLIST,
ATTR_APP_ID,
ATTR_APP_NAME,
ATTR_INPUT_SOURCE,
ATTR_SOUND_MODE,
ATTR_MEDIA_SHUFFLE,
ATTR_MEDIA_REPEAT,
]
@bind_hass
def is_on(hass, entity_id=None):
entity_ids = [entity_id] if entity_id else hass.states.entity_ids(DOMAIN)
return any(
not hass.states.is_state(entity_id, STATE_OFF) for entity_id in entity_ids
)
def _rename_keys(**keys: Any) -> Callable[[dict[str, Any]], dict[str, Any]]:
def rename(value: dict[str, Any]) -> dict[str, Any]:
for to_key, from_key in keys.items():
if from_key in value:
value[to_key] = value.pop(from_key)
return value
return rename
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
component = hass.data[DOMAIN] = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL
)
websocket_api.async_register_command(hass, websocket_handle_thumbnail)
websocket_api.async_register_command(hass, websocket_browse_media)
hass.http.register_view(MediaPlayerImageView(component))
await component.async_setup(config)
component.async_register_entity_service(
SERVICE_TURN_ON, {}, "async_turn_on", [MediaPlayerEntityFeature.TURN_ON]
)
component.async_register_entity_service(
SERVICE_TURN_OFF, {}, "async_turn_off", [MediaPlayerEntityFeature.TURN_OFF]
)
component.async_register_entity_service(
SERVICE_TOGGLE,
{},
"async_toggle",
[MediaPlayerEntityFeature.TURN_OFF | MediaPlayerEntityFeature.TURN_ON],
)
component.async_register_entity_service(
SERVICE_VOLUME_UP,
{},
"async_volume_up",
[MediaPlayerEntityFeature.VOLUME_SET, MediaPlayerEntityFeature.VOLUME_STEP],
)
component.async_register_entity_service(
SERVICE_VOLUME_DOWN,
{},
"async_volume_down",
[MediaPlayerEntityFeature.VOLUME_SET, MediaPlayerEntityFeature.VOLUME_STEP],
)
component.async_register_entity_service(
SERVICE_MEDIA_PLAY_PAUSE,
{},
"async_media_play_pause",
[MediaPlayerEntityFeature.PLAY | MediaPlayerEntityFeature.PAUSE],
)
component.async_register_entity_service(
SERVICE_MEDIA_PLAY, {}, "async_media_play", [MediaPlayerEntityFeature.PLAY]
)
component.async_register_entity_service(
SERVICE_MEDIA_PAUSE, {}, "async_media_pause", [MediaPlayerEntityFeature.PAUSE]
)
component.async_register_entity_service(
SERVICE_MEDIA_STOP, {}, "async_media_stop", [MediaPlayerEntityFeature.STOP]
)
component.async_register_entity_service(
SERVICE_MEDIA_NEXT_TRACK,
{},
"async_media_next_track",
[MediaPlayerEntityFeature.NEXT_TRACK],
)
component.async_register_entity_service(
SERVICE_MEDIA_PREVIOUS_TRACK,
{},
"async_media_previous_track",
[MediaPlayerEntityFeature.PREVIOUS_TRACK],
)
component.async_register_entity_service(
SERVICE_CLEAR_PLAYLIST,
{},
"async_clear_playlist",
[MediaPlayerEntityFeature.CLEAR_PLAYLIST],
)
component.async_register_entity_service(
SERVICE_VOLUME_SET,
vol.All(
cv.make_entity_service_schema(
{vol.Required(ATTR_MEDIA_VOLUME_LEVEL): cv.small_float}
),
_rename_keys(volume=ATTR_MEDIA_VOLUME_LEVEL),
),
"async_set_volume_level",
[MediaPlayerEntityFeature.VOLUME_SET],
)
component.async_register_entity_service(
SERVICE_VOLUME_MUTE,
vol.All(
cv.make_entity_service_schema(
{vol.Required(ATTR_MEDIA_VOLUME_MUTED): cv.boolean}
),
_rename_keys(mute=ATTR_MEDIA_VOLUME_MUTED),
),
"async_mute_volume",
[MediaPlayerEntityFeature.VOLUME_MUTE],
)
component.async_register_entity_service(
SERVICE_MEDIA_SEEK,
vol.All(
cv.make_entity_service_schema(
{vol.Required(ATTR_MEDIA_SEEK_POSITION): cv.positive_float}
),
_rename_keys(position=ATTR_MEDIA_SEEK_POSITION),
),
"async_media_seek",
[MediaPlayerEntityFeature.SEEK],
)
component.async_register_entity_service(
SERVICE_JOIN,
{vol.Required(ATTR_GROUP_MEMBERS): vol.All(cv.ensure_list, [cv.entity_id])},
"async_join_players",
[MediaPlayerEntityFeature.GROUPING],
)
component.async_register_entity_service(
SERVICE_SELECT_SOURCE,
{vol.Required(ATTR_INPUT_SOURCE): cv.string},
"async_select_source",
[MediaPlayerEntityFeature.SELECT_SOURCE],
)
component.async_register_entity_service(
SERVICE_SELECT_SOUND_MODE,
{vol.Required(ATTR_SOUND_MODE): cv.string},
"async_select_sound_mode",
[MediaPlayerEntityFeature.SELECT_SOUND_MODE],
)
def _rewrite_enqueue(value):
if ATTR_MEDIA_ENQUEUE not in value:
pass
elif value[ATTR_MEDIA_ENQUEUE] is True:
value[ATTR_MEDIA_ENQUEUE] = MediaPlayerEnqueue.ADD
_LOGGER.warning(
"Playing media with enqueue set to True is deprecated. Use 'add' instead"
)
elif value[ATTR_MEDIA_ENQUEUE] is False:
value[ATTR_MEDIA_ENQUEUE] = MediaPlayerEnqueue.PLAY
_LOGGER.warning(
"Playing media with enqueue set to False is deprecated. Use 'play' instead"
)
return value
component.async_register_entity_service(
SERVICE_PLAY_MEDIA,
vol.All(
cv.make_entity_service_schema(MEDIA_PLAYER_PLAY_MEDIA_SCHEMA),
_rewrite_enqueue,
_rename_keys(
media_type=ATTR_MEDIA_CONTENT_TYPE,
media_id=ATTR_MEDIA_CONTENT_ID,
enqueue=ATTR_MEDIA_ENQUEUE,
),
),
"async_play_media",
[MediaPlayerEntityFeature.PLAY_MEDIA],
)
component.async_register_entity_service(
SERVICE_SHUFFLE_SET,
{vol.Required(ATTR_MEDIA_SHUFFLE): cv.boolean},
"async_set_shuffle",
[MediaPlayerEntityFeature.SHUFFLE_SET],
)
component.async_register_entity_service(
SERVICE_UNJOIN, {}, "async_unjoin_player", [MediaPlayerEntityFeature.GROUPING]
)
component.async_register_entity_service(
SERVICE_REPEAT_SET,
{vol.Required(ATTR_MEDIA_REPEAT): vol.In(REPEAT_MODES)},
"async_set_repeat",
[MediaPlayerEntityFeature.REPEAT_SET],
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
component: EntityComponent = hass.data[DOMAIN]
return await component.async_setup_entry(entry)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
component: EntityComponent = hass.data[DOMAIN]
return await component.async_unload_entry(entry)
@dataclass
class MediaPlayerEntityDescription(EntityDescription):
device_class: MediaPlayerDeviceClass | str | None = None
class MediaPlayerEntity(Entity):
entity_description: MediaPlayerEntityDescription
_access_token: str | None = None
_attr_app_id: str | None = None
_attr_app_name: str | None = None
_attr_device_class: MediaPlayerDeviceClass | str | None
_attr_group_members: list[str] | None = None
_attr_is_volume_muted: bool | None = None
_attr_media_album_artist: str | None = None
_attr_media_album_name: str | None = None
_attr_media_artist: str | None = None
_attr_media_channel: str | None = None
_attr_media_content_id: str | None = None
_attr_media_content_type: str | None = None
_attr_media_duration: int | None = None
_attr_media_episode: str | None = None
_attr_media_image_hash: str | None
_attr_media_image_remotely_accessible: bool = False
_attr_media_image_url: str | None = None
_attr_media_playlist: str | None = None
_attr_media_position_updated_at: dt.datetime | None = None
_attr_media_position: int | None = None
_attr_media_season: str | None = None
_attr_media_series_title: str | None = None
_attr_media_title: str | None = None
_attr_media_track: int | None = None
_attr_repeat: str | None = None
_attr_shuffle: bool | None = None
_attr_sound_mode_list: list[str] | None = None
_attr_sound_mode: str | None = None
_attr_source_list: list[str] | None = None
_attr_source: str | None = None
_attr_state: str | None = None
_attr_supported_features: int = 0
_attr_volume_level: float | None = None
@property
def device_class(self) -> MediaPlayerDeviceClass | str | None:
if hasattr(self, "_attr_device_class"):
return self._attr_device_class
if hasattr(self, "entity_description"):
return self.entity_description.device_class
return None
@property
def state(self) -> str | None:
return self._attr_state
@property
def access_token(self) -> str:
if self._access_token is None:
self._access_token = secrets.token_hex(32)
return self._access_token
@property
def volume_level(self) -> float | None:
return self._attr_volume_level
@property
def is_volume_muted(self) -> bool | None:
return self._attr_is_volume_muted
@property
def media_content_id(self) -> str | None:
return self._attr_media_content_id
@property
def media_content_type(self) -> str | None:
return self._attr_media_content_type
@property
def media_duration(self) -> int | None:
return self._attr_media_duration
@property
def media_position(self) -> int | None:
return self._attr_media_position
@property
def media_position_updated_at(self) -> dt.datetime | None:
return self._attr_media_position_updated_at
@property
def media_image_url(self) -> str | None:
return self._attr_media_image_url
@property
def media_image_remotely_accessible(self) -> bool:
return self._attr_media_image_remotely_accessible
@property
def media_image_hash(self) -> str | None:
if hasattr(self, "_attr_media_image_hash"):
return self._attr_media_image_hash
if (url := self.media_image_url) is not None:
return hashlib.sha256(url.encode("utf-8")).hexdigest()[:16]
return None
async def async_get_media_image(self) -> tuple[bytes | None, str | None]:
if (url := self.media_image_url) is None:
return None, None
return await self._async_fetch_image_from_cache(url)
async def async_get_browse_image(
self,
media_content_type: str,
media_content_id: str,
media_image_id: str | None = None,
) -> tuple[bytes | None, str | None]:
return None, None
@property
def media_title(self) -> str | None:
return self._attr_media_title
@property
def media_artist(self) -> str | None:
return self._attr_media_artist
@property
def media_album_name(self) -> str | None:
return self._attr_media_album_name
@property
def media_album_artist(self) -> str | None:
return self._attr_media_album_artist
@property
def media_track(self) -> int | None:
return self._attr_media_track
@property
def media_series_title(self) -> str | None:
return self._attr_media_series_title
@property
def media_season(self) -> str | None:
return self._attr_media_season
@property
def media_episode(self) -> str | None:
return self._attr_media_episode
@property
def media_channel(self) -> str | None:
return self._attr_media_channel
@property
def media_playlist(self) -> str | None:
return self._attr_media_playlist
@property
def app_id(self) -> str | None:
return self._attr_app_id
@property
def app_name(self) -> str | None:
return self._attr_app_name
@property
def source(self) -> str | None:
return self._attr_source
@property
def source_list(self) -> list[str] | None:
return self._attr_source_list
@property
def sound_mode(self) -> str | None:
return self._attr_sound_mode
@property
def sound_mode_list(self) -> list[str] | None:
return self._attr_sound_mode_list
@property
def shuffle(self) -> bool | None:
return self._attr_shuffle
@property
def repeat(self) -> str | None:
return self._attr_repeat
@property
def group_members(self) -> list[str] | None:
return self._attr_group_members
@property
def supported_features(self) -> int:
return self._attr_supported_features
def turn_on(self):
raise NotImplementedError()
async def async_turn_on(self):
await self.hass.async_add_executor_job(self.turn_on)
def turn_off(self):
raise NotImplementedError()
async def async_turn_off(self):
await self.hass.async_add_executor_job(self.turn_off)
def mute_volume(self, mute):
raise NotImplementedError()
async def async_mute_volume(self, mute):
await self.hass.async_add_executor_job(self.mute_volume, mute)
def set_volume_level(self, volume):
raise NotImplementedError()
async def async_set_volume_level(self, volume):
await self.hass.async_add_executor_job(self.set_volume_level, volume)
def media_play(self):
raise NotImplementedError()
async def async_media_play(self):
await self.hass.async_add_executor_job(self.media_play)
def media_pause(self):
raise NotImplementedError()
async def async_media_pause(self):
await self.hass.async_add_executor_job(self.media_pause)
def media_stop(self):
raise NotImplementedError()
async def async_media_stop(self):
await self.hass.async_add_executor_job(self.media_stop)
def media_previous_track(self):
raise NotImplementedError()
async def async_media_previous_track(self):
await self.hass.async_add_executor_job(self.media_previous_track)
def media_next_track(self):
raise NotImplementedError()
async def async_media_next_track(self):
await self.hass.async_add_executor_job(self.media_next_track)
def media_seek(self, position):
raise NotImplementedError()
async def async_media_seek(self, position):
await self.hass.async_add_executor_job(self.media_seek, position)
def play_media(self, media_type, media_id, **kwargs):
raise NotImplementedError()
async def async_play_media(self, media_type, media_id, **kwargs):
await self.hass.async_add_executor_job(
ft.partial(self.play_media, media_type, media_id, **kwargs)
)
def select_source(self, source):
raise NotImplementedError()
async def async_select_source(self, source):
await self.hass.async_add_executor_job(self.select_source, source)
def select_sound_mode(self, sound_mode):
raise NotImplementedError()
async def async_select_sound_mode(self, sound_mode):
await self.hass.async_add_executor_job(self.select_sound_mode, sound_mode)
def clear_playlist(self):
raise NotImplementedError()
async def async_clear_playlist(self):
await self.hass.async_add_executor_job(self.clear_playlist)
def set_shuffle(self, shuffle):
raise NotImplementedError()
async def async_set_shuffle(self, shuffle):
await self.hass.async_add_executor_job(self.set_shuffle, shuffle)
def set_repeat(self, repeat):
raise NotImplementedError()
async def async_set_repeat(self, repeat):
await self.hass.async_add_executor_job(self.set_repeat, repeat)
@property
def support_play(self):
return bool(self.supported_features & MediaPlayerEntityFeature.PLAY)
@property
def support_pause(self):
return bool(self.supported_features & MediaPlayerEntityFeature.PAUSE)
@property
def support_stop(self):
return bool(self.supported_features & MediaPlayerEntityFeature.STOP)
@property
def support_seek(self):
return bool(self.supported_features & MediaPlayerEntityFeature.SEEK)
@property
def support_volume_set(self):
return bool(self.supported_features & MediaPlayerEntityFeature.VOLUME_SET)
@property
def support_volume_mute(self):
return bool(self.supported_features & MediaPlayerEntityFeature.VOLUME_MUTE)
@property
def support_previous_track(self):
return bool(self.supported_features & MediaPlayerEntityFeature.PREVIOUS_TRACK)
@property
def support_next_track(self):
return bool(self.supported_features & MediaPlayerEntityFeature.NEXT_TRACK)
@property
def support_play_media(self):
return bool(self.supported_features & MediaPlayerEntityFeature.PLAY_MEDIA)
@property
def support_select_source(self):
return bool(self.supported_features & MediaPlayerEntityFeature.SELECT_SOURCE)
@property
def support_select_sound_mode(self):
return bool(
self.supported_features & MediaPlayerEntityFeature.SELECT_SOUND_MODE
)
@property
def support_clear_playlist(self):
return bool(self.supported_features & MediaPlayerEntityFeature.CLEAR_PLAYLIST)
@property
def support_shuffle_set(self):
return bool(self.supported_features & MediaPlayerEntityFeature.SHUFFLE_SET)
@property
def support_grouping(self):
return bool(self.supported_features & MediaPlayerEntityFeature.GROUPING)
async def async_toggle(self):
if hasattr(self, "toggle"):
await self.hass.async_add_executor_job(self.toggle)
return
if self.state in (STATE_OFF, STATE_IDLE):
await self.async_turn_on()
else:
await self.async_turn_off()
async def async_volume_up(self):
if hasattr(self, "volume_up"):
await self.hass.async_add_executor_job(self.volume_up)
return
if (
self.volume_level < 1
and self.supported_features & MediaPlayerEntityFeature.VOLUME_SET
):
await self.async_set_volume_level(min(1, self.volume_level + 0.1))
async def async_volume_down(self):
if hasattr(self, "volume_down"):
await self.hass.async_add_executor_job(self.volume_down)
return
if (
self.volume_level > 0
and self.supported_features & MediaPlayerEntityFeature.VOLUME_SET
):
await self.async_set_volume_level(max(0, self.volume_level - 0.1))
async def async_media_play_pause(self):
if hasattr(self, "media_play_pause"):
await self.hass.async_add_executor_job(self.media_play_pause)
return
if self.state == STATE_PLAYING:
await self.async_media_pause()
else:
await self.async_media_play()
@property
def entity_picture(self):
if self.state == STATE_OFF:
return None
if self.media_image_remotely_accessible:
return self.media_image_url
return self.media_image_local
@property
def media_image_local(self):
if (image_hash := self.media_image_hash) is None:
return None
return (
f"/api/media_player_proxy/{self.entity_id}?"
f"token={self.access_token}&cache={image_hash}"
)
@property
def capability_attributes(self):
supported_features = self.supported_features or 0
data = {}
if supported_features & MediaPlayerEntityFeature.SELECT_SOURCE and (
source_list := self.source_list
):
data[ATTR_INPUT_SOURCE_LIST] = source_list
if supported_features & MediaPlayerEntityFeature.SELECT_SOUND_MODE and (
sound_mode_list := self.sound_mode_list
):
data[ATTR_SOUND_MODE_LIST] = sound_mode_list
return data
@final
@property
def state_attributes(self):
state_attr = {}
if self.support_grouping:
state_attr[ATTR_GROUP_MEMBERS] = self.group_members
if self.state == STATE_OFF:
return state_attr
for attr in ATTR_TO_PROPERTY:
if (value := getattr(self, attr)) is not None:
state_attr[attr] = value
if self.media_image_remotely_accessible:
state_attr[ATTR_ENTITY_PICTURE_LOCAL] = self.media_image_local
return state_attr
async def async_browse_media(
self,
media_content_type: str | None = None,
media_content_id: str | None = None,
) -> BrowseMedia:
raise NotImplementedError()
def join_players(self, group_members):
raise NotImplementedError()
async def async_join_players(self, group_members):
await self.hass.async_add_executor_job(self.join_players, group_members)
def unjoin_player(self):
raise NotImplementedError()
async def async_unjoin_player(self):
await self.hass.async_add_executor_job(self.unjoin_player)
async def _async_fetch_image_from_cache(
self, url: str
) -> tuple[bytes | None, str | None]:
cache_images = cast(collections.OrderedDict, ENTITY_IMAGE_CACHE[CACHE_IMAGES])
cache_maxsize = cast(int, ENTITY_IMAGE_CACHE[CACHE_MAXSIZE])
if urlparse(url).hostname is None:
url = f"{get_url(self.hass)}{url}"
if url not in cache_images:
cache_images[url] = {CACHE_LOCK: asyncio.Lock()}
async with cache_images[url][CACHE_LOCK]:
if CACHE_CONTENT in cache_images[url]:
return cache_images[url][CACHE_CONTENT]
(content, content_type) = await self._async_fetch_image(url)
async with cache_images[url][CACHE_LOCK]:
cache_images[url][CACHE_CONTENT] = content, content_type
while len(cache_images) > cache_maxsize:
cache_images.popitem(last=False)
return content, content_type
async def _async_fetch_image(self, url: str) -> tuple[bytes | None, str | None]:
return await async_fetch_image(_LOGGER, self.hass, url)
def get_browse_image_url(
self,
media_content_type: str,
media_content_id: str,
media_image_id: str | None = None,
) -> str:
url_path = (
f"/api/media_player_proxy/{self.entity_id}/browse_media"
f"/{media_content_type}/{media_content_id}"
)
url_query = {"token": self.access_token}
if media_image_id:
url_query["media_image_id"] = media_image_id
return str(URL(url_path).with_query(url_query))
class MediaPlayerImageView(HomeAssistantView):
requires_auth = False
url = "/api/media_player_proxy/{entity_id}"
name = "api:media_player:image"
extra_urls = [
url + "/browse_media/{media_content_type}/{media_content_id}",
]
def __init__(self, component: EntityComponent) -> None:
self.component = component
async def get(
self,
request: web.Request,
entity_id: str,
media_content_type: str | None = None,
media_content_id: str | None = None,
) -> web.Response:
if (player := self.component.get_entity(entity_id)) is None:
status = (
HTTPStatus.NOT_FOUND
if request[KEY_AUTHENTICATED]
else HTTPStatus.UNAUTHORIZED
)
return web.Response(status=status)
assert isinstance(player, MediaPlayerEntity)
authenticated = (
request[KEY_AUTHENTICATED]
or request.query.get("token") == player.access_token
)
if not authenticated:
return web.Response(status=HTTPStatus.UNAUTHORIZED)
if media_content_type and media_content_id:
media_image_id = request.query.get("media_image_id")
data, content_type = await player.async_get_browse_image(
media_content_type, media_content_id, media_image_id
)
else:
data, content_type = await player.async_get_media_image()
if data is None:
return web.Response(status=HTTPStatus.INTERNAL_SERVER_ERROR)
headers: LooseHeaders = {CACHE_CONTROL: "max-age=3600"}
return web.Response(body=data, content_type=content_type, headers=headers)
@websocket_api.websocket_command(
{
vol.Required("type"): "media_player_thumbnail",
vol.Required("entity_id"): cv.entity_id,
}
)
@websocket_api.async_response
async def websocket_handle_thumbnail(hass, connection, msg):
component = hass.data[DOMAIN]
if (player := component.get_entity(msg["entity_id"])) is None:
connection.send_message(
websocket_api.error_message(msg["id"], ERR_NOT_FOUND, "Entity not found")
)
return
_LOGGER.warning(
"The websocket command media_player_thumbnail is deprecated. Use /api/media_player_proxy instead"
)
data, content_type = await player.async_get_media_image()
if data is None:
connection.send_message(
websocket_api.error_message(
msg["id"], "thumbnail_fetch_failed", "Failed to fetch thumbnail"
)
)
return
await connection.send_big_result(
msg["id"],
{
"content_type": content_type,
"content": base64.b64encode(data).decode("utf-8"),
},
)
@websocket_api.websocket_command(
{
vol.Required("type"): "media_player/browse_media",
vol.Required("entity_id"): cv.entity_id,
vol.Inclusive(
ATTR_MEDIA_CONTENT_TYPE,
"media_ids",
"media_content_type and media_content_id must be provided together",
): str,
vol.Inclusive(
ATTR_MEDIA_CONTENT_ID,
"media_ids",
"media_content_type and media_content_id must be provided together",
): str,
}
)
@websocket_api.async_response
async def websocket_browse_media(hass, connection, msg):
component = hass.data[DOMAIN]
player: MediaPlayerEntity | None = component.get_entity(msg["entity_id"])
if player is None:
connection.send_error(msg["id"], "entity_not_found", "Entity not found")
return
if not player.supported_features & MediaPlayerEntityFeature.BROWSE_MEDIA:
connection.send_message(
websocket_api.error_message(
msg["id"], ERR_NOT_SUPPORTED, "Player does not support browsing media"
)
)
return
media_content_type = msg.get(ATTR_MEDIA_CONTENT_TYPE)
media_content_id = msg.get(ATTR_MEDIA_CONTENT_ID)
try:
payload = await player.async_browse_media(media_content_type, media_content_id)
except NotImplementedError:
_LOGGER.error(
"%s allows media browsing but its integration (%s) does not",
player.entity_id,
player.platform.platform_name,
)
connection.send_message(
websocket_api.error_message(
msg["id"],
ERR_NOT_SUPPORTED,
"Integration does not support browsing media",
)
)
return
except BrowseError as err:
connection.send_message(
websocket_api.error_message(msg["id"], ERR_UNKNOWN_ERROR, str(err))
)
return
if isinstance(payload, BrowseMedia):
payload = payload.as_dict()
else:
_LOGGER.warning("Browse Media should use new BrowseMedia class")
connection.send_result(msg["id"], payload)
async def async_fetch_image(
logger: logging.Logger, hass: HomeAssistant, url: str
) -> tuple[bytes | None, str | None]:
content, content_type = (None, None)
websession = async_get_clientsession(hass)
with suppress(asyncio.TimeoutError), async_timeout.timeout(10):
response = await websession.get(url)
if response.status == HTTPStatus.OK:
content = await response.read()
if content_type := response.headers.get(CONTENT_TYPE):
content_type = content_type.split(";")[0]
if content is None:
url_parts = URL(url)
if url_parts.user is not None:
url_parts = url_parts.with_user("xxxx")
if url_parts.password is not None:
url_parts = url_parts.with_password("xxxxxxxx")
url = str(url_parts)
logger.warning("Error retrieving proxied image from %s", url)
return content, content_type
| true | true |
f71f41dc2939024b8e868c9cd42129d682fd9c29 | 7,083 | py | Python | homeassistant/components/aquostv/media_player.py | itewk/home-assistant | 769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4 | [
"Apache-2.0"
] | 23 | 2017-11-15T21:03:53.000Z | 2021-03-29T21:33:48.000Z | homeassistant/components/aquostv/media_player.py | itewk/home-assistant | 769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4 | [
"Apache-2.0"
] | 9 | 2022-01-27T06:32:10.000Z | 2022-03-31T07:07:51.000Z | homeassistant/components/aquostv/media_player.py | itewk/home-assistant | 769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4 | [
"Apache-2.0"
] | 10 | 2018-01-01T00:12:51.000Z | 2021-12-21T23:08:05.000Z | """Support for interface with an Aquos TV."""
import logging
import sharp_aquos_rc
import voluptuous as vol
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerDevice
from homeassistant.components.media_player.const import (
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SELECT_SOURCE,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_TIMEOUT,
CONF_USERNAME,
STATE_OFF,
STATE_ON,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Sharp Aquos TV"
DEFAULT_PORT = 10002
DEFAULT_USERNAME = "admin"
DEFAULT_PASSWORD = "password"
DEFAULT_TIMEOUT = 0.5
DEFAULT_RETRIES = 2
SUPPORT_SHARPTV = (
SUPPORT_TURN_OFF
| SUPPORT_NEXT_TRACK
| SUPPORT_PAUSE
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_SELECT_SOURCE
| SUPPORT_VOLUME_MUTE
| SUPPORT_VOLUME_STEP
| SUPPORT_VOLUME_SET
| SUPPORT_PLAY
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD, default=DEFAULT_PASSWORD): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.string,
vol.Optional("retries", default=DEFAULT_RETRIES): cv.string,
vol.Optional("power_on_enabled", default=False): cv.boolean,
}
)
SOURCES = {
0: "TV / Antenna",
1: "HDMI_IN_1",
2: "HDMI_IN_2",
3: "HDMI_IN_3",
4: "HDMI_IN_4",
5: "COMPONENT IN",
6: "VIDEO_IN_1",
7: "VIDEO_IN_2",
8: "PC_IN",
}
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Sharp Aquos TV platform."""
name = config.get(CONF_NAME)
port = config.get(CONF_PORT)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
power_on_enabled = config.get("power_on_enabled")
if discovery_info:
_LOGGER.debug("%s", discovery_info)
vals = discovery_info.split(":")
if len(vals) > 1:
port = vals[1]
host = vals[0]
remote = sharp_aquos_rc.TV(host, port, username, password, timeout=20)
add_entities([SharpAquosTVDevice(name, remote, power_on_enabled)])
return True
host = config.get(CONF_HOST)
remote = sharp_aquos_rc.TV(host, port, username, password, 15, 1)
add_entities([SharpAquosTVDevice(name, remote, power_on_enabled)])
return True
def _retry(func):
"""Handle query retries."""
def wrapper(obj, *args, **kwargs):
"""Wrap all query functions."""
update_retries = 5
while update_retries > 0:
try:
func(obj, *args, **kwargs)
break
except (OSError, TypeError, ValueError):
update_retries -= 1
if update_retries == 0:
obj.set_state(STATE_OFF)
return wrapper
class SharpAquosTVDevice(MediaPlayerDevice):
"""Representation of a Aquos TV."""
def __init__(self, name, remote, power_on_enabled=False):
"""Initialize the aquos device."""
global SUPPORT_SHARPTV
self._power_on_enabled = power_on_enabled
if self._power_on_enabled:
SUPPORT_SHARPTV = SUPPORT_SHARPTV | SUPPORT_TURN_ON
# Save a reference to the imported class
self._name = name
# Assume that the TV is not muted
self._muted = False
self._state = None
self._remote = remote
self._volume = 0
self._source = None
self._source_list = list(SOURCES.values())
def set_state(self, state):
"""Set TV state."""
self._state = state
@_retry
def update(self):
"""Retrieve the latest data."""
if self._remote.power() == 1:
self._state = STATE_ON
else:
self._state = STATE_OFF
# Set TV to be able to remotely power on
if self._power_on_enabled:
self._remote.power_on_command_settings(2)
else:
self._remote.power_on_command_settings(0)
# Get mute state
if self._remote.mute() == 2:
self._muted = False
else:
self._muted = True
# Get source
self._source = SOURCES.get(self._remote.input())
# Get volume
self._volume = self._remote.volume() / 60
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def source(self):
"""Return the current source."""
return self._source
@property
def source_list(self):
"""Return the source list."""
return self._source_list
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._muted
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_SHARPTV
@_retry
def turn_off(self):
"""Turn off tvplayer."""
self._remote.power(0)
@_retry
def volume_up(self):
"""Volume up the media player."""
self._remote.volume(int(self._volume * 60) + 2)
@_retry
def volume_down(self):
"""Volume down media player."""
self._remote.volume(int(self._volume * 60) - 2)
@_retry
def set_volume_level(self, volume):
"""Set Volume media player."""
self._remote.volume(int(volume * 60))
@_retry
def mute_volume(self, mute):
"""Send mute command."""
self._remote.mute(0)
@_retry
def turn_on(self):
"""Turn the media player on."""
self._remote.power(1)
@_retry
def media_play_pause(self):
"""Simulate play pause media player."""
self._remote.remote_button(40)
@_retry
def media_play(self):
"""Send play command."""
self._remote.remote_button(16)
@_retry
def media_pause(self):
"""Send pause command."""
self._remote.remote_button(16)
@_retry
def media_next_track(self):
"""Send next track command."""
self._remote.remote_button(21)
@_retry
def media_previous_track(self):
"""Send the previous track command."""
self._remote.remote_button(19)
def select_source(self, source):
"""Set the input source."""
for key, value in SOURCES.items():
if source == value:
self._remote.input(key)
| 26.829545 | 84 | 0.624876 | import logging
import sharp_aquos_rc
import voluptuous as vol
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerDevice
from homeassistant.components.media_player.const import (
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SELECT_SOURCE,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_TIMEOUT,
CONF_USERNAME,
STATE_OFF,
STATE_ON,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Sharp Aquos TV"
DEFAULT_PORT = 10002
DEFAULT_USERNAME = "admin"
DEFAULT_PASSWORD = "password"
DEFAULT_TIMEOUT = 0.5
DEFAULT_RETRIES = 2
SUPPORT_SHARPTV = (
SUPPORT_TURN_OFF
| SUPPORT_NEXT_TRACK
| SUPPORT_PAUSE
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_SELECT_SOURCE
| SUPPORT_VOLUME_MUTE
| SUPPORT_VOLUME_STEP
| SUPPORT_VOLUME_SET
| SUPPORT_PLAY
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD, default=DEFAULT_PASSWORD): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.string,
vol.Optional("retries", default=DEFAULT_RETRIES): cv.string,
vol.Optional("power_on_enabled", default=False): cv.boolean,
}
)
SOURCES = {
0: "TV / Antenna",
1: "HDMI_IN_1",
2: "HDMI_IN_2",
3: "HDMI_IN_3",
4: "HDMI_IN_4",
5: "COMPONENT IN",
6: "VIDEO_IN_1",
7: "VIDEO_IN_2",
8: "PC_IN",
}
def setup_platform(hass, config, add_entities, discovery_info=None):
name = config.get(CONF_NAME)
port = config.get(CONF_PORT)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
power_on_enabled = config.get("power_on_enabled")
if discovery_info:
_LOGGER.debug("%s", discovery_info)
vals = discovery_info.split(":")
if len(vals) > 1:
port = vals[1]
host = vals[0]
remote = sharp_aquos_rc.TV(host, port, username, password, timeout=20)
add_entities([SharpAquosTVDevice(name, remote, power_on_enabled)])
return True
host = config.get(CONF_HOST)
remote = sharp_aquos_rc.TV(host, port, username, password, 15, 1)
add_entities([SharpAquosTVDevice(name, remote, power_on_enabled)])
return True
def _retry(func):
def wrapper(obj, *args, **kwargs):
update_retries = 5
while update_retries > 0:
try:
func(obj, *args, **kwargs)
break
except (OSError, TypeError, ValueError):
update_retries -= 1
if update_retries == 0:
obj.set_state(STATE_OFF)
return wrapper
class SharpAquosTVDevice(MediaPlayerDevice):
def __init__(self, name, remote, power_on_enabled=False):
global SUPPORT_SHARPTV
self._power_on_enabled = power_on_enabled
if self._power_on_enabled:
SUPPORT_SHARPTV = SUPPORT_SHARPTV | SUPPORT_TURN_ON
self._name = name
self._muted = False
self._state = None
self._remote = remote
self._volume = 0
self._source = None
self._source_list = list(SOURCES.values())
def set_state(self, state):
self._state = state
@_retry
def update(self):
if self._remote.power() == 1:
self._state = STATE_ON
else:
self._state = STATE_OFF
if self._power_on_enabled:
self._remote.power_on_command_settings(2)
else:
self._remote.power_on_command_settings(0)
if self._remote.mute() == 2:
self._muted = False
else:
self._muted = True
self._source = SOURCES.get(self._remote.input())
self._volume = self._remote.volume() / 60
@property
def name(self):
return self._name
@property
def state(self):
return self._state
@property
def source(self):
return self._source
@property
def source_list(self):
return self._source_list
@property
def volume_level(self):
return self._volume
@property
def is_volume_muted(self):
return self._muted
@property
def supported_features(self):
return SUPPORT_SHARPTV
@_retry
def turn_off(self):
self._remote.power(0)
@_retry
def volume_up(self):
self._remote.volume(int(self._volume * 60) + 2)
@_retry
def volume_down(self):
self._remote.volume(int(self._volume * 60) - 2)
@_retry
def set_volume_level(self, volume):
self._remote.volume(int(volume * 60))
@_retry
def mute_volume(self, mute):
self._remote.mute(0)
@_retry
def turn_on(self):
self._remote.power(1)
@_retry
def media_play_pause(self):
self._remote.remote_button(40)
@_retry
def media_play(self):
self._remote.remote_button(16)
@_retry
def media_pause(self):
self._remote.remote_button(16)
@_retry
def media_next_track(self):
self._remote.remote_button(21)
@_retry
def media_previous_track(self):
self._remote.remote_button(19)
def select_source(self, source):
for key, value in SOURCES.items():
if source == value:
self._remote.input(key)
| true | true |
f71f420d01f47ee2aae3767b5e08211606d22d13 | 10,364 | py | Python | lib/python3.8/site-packages/ansible_collections/fortinet/fortios/plugins/modules/fortios_webfilter_ftgd_local_cat.py | cjsteel/python3-venv-ansible-2.10.5 | c95395c4cae844dc66fddde9b4343966f4b2ecd5 | [
"Apache-1.1"
] | null | null | null | lib/python3.8/site-packages/ansible_collections/fortinet/fortios/plugins/modules/fortios_webfilter_ftgd_local_cat.py | cjsteel/python3-venv-ansible-2.10.5 | c95395c4cae844dc66fddde9b4343966f4b2ecd5 | [
"Apache-1.1"
] | null | null | null | lib/python3.8/site-packages/ansible_collections/fortinet/fortios/plugins/modules/fortios_webfilter_ftgd_local_cat.py | cjsteel/python3-venv-ansible-2.10.5 | c95395c4cae844dc66fddde9b4343966f4b2ecd5 | [
"Apache-1.1"
] | null | null | null | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019-2020 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_webfilter_ftgd_local_cat
short_description: Configure FortiGuard Web Filter local categories in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify webfilter feature and ftgd_local_cat category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.0
version_added: "2.8"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Hongbin Lu (@fgtdev-hblu)
- Frank Shen (@frankshen01)
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Legacy fortiosapi has been deprecated, httpapi is the preferred way to run playbooks
requirements:
- ansible>=2.9.0
options:
access_token:
description:
- Token-based authentication.
Generated from GUI of Fortigate.
type: str
required: false
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
webfilter_ftgd_local_cat:
description:
- Configure FortiGuard Web Filter local categories.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
desc:
description:
- Local category description.
required: true
type: str
id:
description:
- Local category ID.
type: int
status:
description:
- Enable/disable the local category.
type: str
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: fortigates
collections:
- fortinet.fortios
connection: httpapi
vars:
vdom: "root"
ansible_httpapi_use_ssl: yes
ansible_httpapi_validate_certs: no
ansible_httpapi_port: 443
tasks:
- name: Configure FortiGuard Web Filter local categories.
fortios_webfilter_ftgd_local_cat:
vdom: "{{ vdom }}"
state: "present"
access_token: "<your_own_value>"
webfilter_ftgd_local_cat:
desc: "<your_own_value>"
id: "4"
status: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import FortiOSHandler
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_legacy_fortiosapi
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FAIL_SOCKET_MSG
def filter_webfilter_ftgd_local_cat_data(json):
option_list = ['desc', 'id', 'status']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def webfilter_ftgd_local_cat(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['webfilter_ftgd_local_cat'] and data['webfilter_ftgd_local_cat']['state']:
state = data['webfilter_ftgd_local_cat']['state']
else:
state = True
webfilter_ftgd_local_cat_data = data['webfilter_ftgd_local_cat']
filtered_data = underscore_to_hyphen(filter_webfilter_ftgd_local_cat_data(webfilter_ftgd_local_cat_data))
if state == "present":
return fos.set('webfilter',
'ftgd-local-cat',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('webfilter',
'ftgd-local-cat',
mkey=filtered_data['desc'],
vdom=vdom)
else:
fos._module.fail_json(msg='state must be present or absent!')
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_webfilter(data, fos):
if data['webfilter_ftgd_local_cat']:
resp = webfilter_ftgd_local_cat(data, fos)
else:
fos._module.fail_json(msg='missing task body: %s' % ('webfilter_ftgd_local_cat'))
return not is_successful_status(resp), \
resp['status'] == "success" and \
(resp['revision_changed'] if 'revision_changed' in resp else True), \
resp
def main():
mkeyname = 'desc'
fields = {
"access_token": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"webfilter_ftgd_local_cat": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"desc": {"required": True, "type": "str"},
"id": {"required": False, "type": "int"},
"status": {"required": False, "type": "str",
"choices": ["enable",
"disable"]}
}
}
}
check_legacy_fortiosapi()
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
versions_check_result = None
if module._socket_path:
connection = Connection(module._socket_path)
if 'access_token' in module.params:
connection.set_option('access_token', module.params['access_token'])
fos = FortiOSHandler(connection, module, mkeyname)
is_error, has_changed, result = fortios_webfilter(module.params, fos)
versions_check_result = connection.get_system_version()
else:
module.fail_json(**FAIL_SOCKET_MSG)
if versions_check_result and versions_check_result['matched'] is False:
module.warn("Ansible has detected version mismatch between FortOS system and galaxy, see more details by specifying option -vvv")
if not is_error:
if versions_check_result and versions_check_result['matched'] is False:
module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result)
else:
module.exit_json(changed=has_changed, meta=result)
else:
if versions_check_result and versions_check_result['matched'] is False:
module.fail_json(msg="Error in repo", version_check_warning=versions_check_result, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| 32.3875 | 137 | 0.631513 |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_webfilter_ftgd_local_cat
short_description: Configure FortiGuard Web Filter local categories in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify webfilter feature and ftgd_local_cat category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.0
version_added: "2.8"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Hongbin Lu (@fgtdev-hblu)
- Frank Shen (@frankshen01)
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Legacy fortiosapi has been deprecated, httpapi is the preferred way to run playbooks
requirements:
- ansible>=2.9.0
options:
access_token:
description:
- Token-based authentication.
Generated from GUI of Fortigate.
type: str
required: false
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
webfilter_ftgd_local_cat:
description:
- Configure FortiGuard Web Filter local categories.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
desc:
description:
- Local category description.
required: true
type: str
id:
description:
- Local category ID.
type: int
status:
description:
- Enable/disable the local category.
type: str
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: fortigates
collections:
- fortinet.fortios
connection: httpapi
vars:
vdom: "root"
ansible_httpapi_use_ssl: yes
ansible_httpapi_validate_certs: no
ansible_httpapi_port: 443
tasks:
- name: Configure FortiGuard Web Filter local categories.
fortios_webfilter_ftgd_local_cat:
vdom: "{{ vdom }}"
state: "present"
access_token: "<your_own_value>"
webfilter_ftgd_local_cat:
desc: "<your_own_value>"
id: "4"
status: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import FortiOSHandler
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_legacy_fortiosapi
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FAIL_SOCKET_MSG
def filter_webfilter_ftgd_local_cat_data(json):
option_list = ['desc', 'id', 'status']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def webfilter_ftgd_local_cat(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['webfilter_ftgd_local_cat'] and data['webfilter_ftgd_local_cat']['state']:
state = data['webfilter_ftgd_local_cat']['state']
else:
state = True
webfilter_ftgd_local_cat_data = data['webfilter_ftgd_local_cat']
filtered_data = underscore_to_hyphen(filter_webfilter_ftgd_local_cat_data(webfilter_ftgd_local_cat_data))
if state == "present":
return fos.set('webfilter',
'ftgd-local-cat',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('webfilter',
'ftgd-local-cat',
mkey=filtered_data['desc'],
vdom=vdom)
else:
fos._module.fail_json(msg='state must be present or absent!')
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_webfilter(data, fos):
if data['webfilter_ftgd_local_cat']:
resp = webfilter_ftgd_local_cat(data, fos)
else:
fos._module.fail_json(msg='missing task body: %s' % ('webfilter_ftgd_local_cat'))
return not is_successful_status(resp), \
resp['status'] == "success" and \
(resp['revision_changed'] if 'revision_changed' in resp else True), \
resp
def main():
mkeyname = 'desc'
fields = {
"access_token": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"webfilter_ftgd_local_cat": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"desc": {"required": True, "type": "str"},
"id": {"required": False, "type": "int"},
"status": {"required": False, "type": "str",
"choices": ["enable",
"disable"]}
}
}
}
check_legacy_fortiosapi()
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
versions_check_result = None
if module._socket_path:
connection = Connection(module._socket_path)
if 'access_token' in module.params:
connection.set_option('access_token', module.params['access_token'])
fos = FortiOSHandler(connection, module, mkeyname)
is_error, has_changed, result = fortios_webfilter(module.params, fos)
versions_check_result = connection.get_system_version()
else:
module.fail_json(**FAIL_SOCKET_MSG)
if versions_check_result and versions_check_result['matched'] is False:
module.warn("Ansible has detected version mismatch between FortOS system and galaxy, see more details by specifying option -vvv")
if not is_error:
if versions_check_result and versions_check_result['matched'] is False:
module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result)
else:
module.exit_json(changed=has_changed, meta=result)
else:
if versions_check_result and versions_check_result['matched'] is False:
module.fail_json(msg="Error in repo", version_check_warning=versions_check_result, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| true | true |
f71f426681d3d3e22fb9c11f435ae1635177faa0 | 135 | py | Python | helloworld_core/__init__.py | datadistillr/jupyter_integration_base | 76202e63208593d411c735d6e54790157bdd400a | [
"Apache-2.0"
] | 5 | 2020-06-18T14:16:57.000Z | 2020-11-17T18:49:57.000Z | helloworld_core/__init__.py | JohnOmernik/jupyter_integration_base | b998db6036b739fc8cdf2a0c761485950858194c | [
"Apache-2.0"
] | 1 | 2021-10-23T01:57:08.000Z | 2021-10-23T01:57:08.000Z | helloworld_core/__init__.py | datadistillr/jupyter_integration_base | 76202e63208593d411c735d6e54790157bdd400a | [
"Apache-2.0"
] | 2 | 2021-04-12T19:50:52.000Z | 2021-04-22T09:25:05.000Z | from addon_core import Addon
from helloworld_core.helloworld_base import Helloworld
from helloworld_core._version import __version__
| 22.5 | 54 | 0.881481 | from addon_core import Addon
from helloworld_core.helloworld_base import Helloworld
from helloworld_core._version import __version__
| true | true |
f71f436e490f03025f48fefe039e4f6dab564d10 | 3,560 | py | Python | triangular_lattice/diecutting/result_n2.py | ssh0/growing-string | 2e43916e91157dfb4253775149b35ec9d81ef14d | [
"MIT"
] | null | null | null | triangular_lattice/diecutting/result_n2.py | ssh0/growing-string | 2e43916e91157dfb4253775149b35ec9d81ef14d | [
"MIT"
] | 1 | 2016-04-14T08:15:28.000Z | 2016-04-27T02:57:13.000Z | triangular_lattice/diecutting/result_n2.py | ssh0/growing-string | 2e43916e91157dfb4253775149b35ec9d81ef14d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# written by Shotaro Fujimoto
# 2016-12-07
import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d.axes3d import Axes3D
import matplotlib.cm as cm
import numpy as np
from scipy.optimize import curve_fit
from scipy.stats import gamma
import set_data_path
def load_data(_path):
data = np.load(_path)
beta = data['beta']
try:
size_dist_ave = data['size_dist_ave']
return load_data_averaged(_path)
except KeyError:
pass
num_of_strings = data['num_of_strings']
frames = data['frames']
Ls = data['Ls'].astype(np.float)
# Ls = (3 * Ls * (Ls + 1) + 1)
size_dist = data['size_dist']
N0 = np.array([l[1] for l in size_dist], dtype=np.float) / num_of_strings
n0 = N0[1:]
S = np.array([np.sum(l) for l in size_dist], dtype=np.float) / num_of_strings
n1 = (S[1:] - n0) * 2.
N = []
for l in size_dist:
dot = np.dot(np.arange(len(l)), np.array(l).T)
N.append(dot)
# N = np.array([np.dot(np.arange(len(l)), np.array(l).T) for l in size_dist])
N_all = 3. * Ls * (Ls + 1.) + 1
N = np.array(N, dtype=np.float) / num_of_strings
N_minus = N_all - N
N_minus_rate = N_minus / N_all
n_minus = N_minus[1:] - N_minus[:-1]
n1_ave = n1 / np.sum(n1)
n2 = (6 * Ls[1:]) - (n0 + n1 + n_minus)
return {
'beta': beta,
'num_of_strings': num_of_strings,
'frames': frames,
'Ls': Ls,
'N_minus': N_minus,
'N_minus_rate': N_minus_rate,
'S': S,
'n0': n0,
'n1': n1,
'n2': n2,
'n_minus': n_minus,
'n1_ave': n1_ave,
}
def load_data_averaged(_path):
data = np.load(_path)
beta = data['beta']
num_of_strings = data['num_of_strings']
frames = data['frames']
Ls = data['Ls'].astype(np.float)
# Ls = (3 * Ls * (Ls + 1) + 1)
# size_dist = data['size_dist']
size_dist_ave = data['size_dist_ave']
N0 = np.array([l[1] for l in size_dist_ave], dtype=np.float)
n0 = N0[1:]
S = np.array([np.sum(l) for l in size_dist_ave], dtype=np.float)
n1 = (S[1:] - n0) * 2.
N = []
for l in size_dist_ave:
dot = np.dot(np.arange(len(l)), np.array(l).T)
N.append(dot)
# N = np.array([np.dot(np.arange(len(l)), np.array(l).T) for l in size_dist_ave])
N_all = 3. * Ls * (Ls + 1.) + 1
N = np.array(N, dtype=np.float)
N_minus = N_all - N
N_minus_rate = N_minus / N_all
n_minus = N_minus[1:] - N_minus[:-1]
n1_ave = n1 / np.sum(n1)
n2 = (6 * Ls[1:]) - (n0 + n1 + n_minus)
return {
'beta': beta,
'num_of_strings': num_of_strings,
'frames': frames,
'Ls': Ls,
'N_minus': N_minus,
'N_minus_rate': N_minus_rate,
'S': S,
'n0': n0,
'n1': n1,
'n2': n2,
'n_minus': n_minus,
'n1_ave': n1_ave,
}
def result_n2(path):
fig, ax = plt.subplots()
for i, result_data_path in enumerate(path):
globals().update(load_data(result_data_path))
ax.plot(Ls[1:], n2, '.', label=r'$\beta = %2.2f$' % beta,
color=cm.viridis(float(i) / len(path)))
ax.legend(loc='best')
ax.set_title('Averaged number of the sites on the cutting edges which \
is connected to two neighbors.' +
' (sample: {})'.format(num_of_strings))
ax.set_xlabel(r'Cutting size $L$')
ax.set_ylabel(r'$n_{2}$')
plt.show()
if __name__ == '__main__':
result_n2(set_data_path.data_path)
| 26.969697 | 85 | 0.560393 |
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
from scipy.optimize import curve_fit
from scipy.stats import gamma
import set_data_path
def load_data(_path):
data = np.load(_path)
beta = data['beta']
try:
size_dist_ave = data['size_dist_ave']
return load_data_averaged(_path)
except KeyError:
pass
num_of_strings = data['num_of_strings']
frames = data['frames']
Ls = data['Ls'].astype(np.float)
size_dist = data['size_dist']
N0 = np.array([l[1] for l in size_dist], dtype=np.float) / num_of_strings
n0 = N0[1:]
S = np.array([np.sum(l) for l in size_dist], dtype=np.float) / num_of_strings
n1 = (S[1:] - n0) * 2.
N = []
for l in size_dist:
dot = np.dot(np.arange(len(l)), np.array(l).T)
N.append(dot)
N_all = 3. * Ls * (Ls + 1.) + 1
N = np.array(N, dtype=np.float) / num_of_strings
N_minus = N_all - N
N_minus_rate = N_minus / N_all
n_minus = N_minus[1:] - N_minus[:-1]
n1_ave = n1 / np.sum(n1)
n2 = (6 * Ls[1:]) - (n0 + n1 + n_minus)
return {
'beta': beta,
'num_of_strings': num_of_strings,
'frames': frames,
'Ls': Ls,
'N_minus': N_minus,
'N_minus_rate': N_minus_rate,
'S': S,
'n0': n0,
'n1': n1,
'n2': n2,
'n_minus': n_minus,
'n1_ave': n1_ave,
}
def load_data_averaged(_path):
data = np.load(_path)
beta = data['beta']
num_of_strings = data['num_of_strings']
frames = data['frames']
Ls = data['Ls'].astype(np.float)
size_dist_ave = data['size_dist_ave']
N0 = np.array([l[1] for l in size_dist_ave], dtype=np.float)
n0 = N0[1:]
S = np.array([np.sum(l) for l in size_dist_ave], dtype=np.float)
n1 = (S[1:] - n0) * 2.
N = []
for l in size_dist_ave:
dot = np.dot(np.arange(len(l)), np.array(l).T)
N.append(dot)
N_all = 3. * Ls * (Ls + 1.) + 1
N = np.array(N, dtype=np.float)
N_minus = N_all - N
N_minus_rate = N_minus / N_all
n_minus = N_minus[1:] - N_minus[:-1]
n1_ave = n1 / np.sum(n1)
n2 = (6 * Ls[1:]) - (n0 + n1 + n_minus)
return {
'beta': beta,
'num_of_strings': num_of_strings,
'frames': frames,
'Ls': Ls,
'N_minus': N_minus,
'N_minus_rate': N_minus_rate,
'S': S,
'n0': n0,
'n1': n1,
'n2': n2,
'n_minus': n_minus,
'n1_ave': n1_ave,
}
def result_n2(path):
fig, ax = plt.subplots()
for i, result_data_path in enumerate(path):
globals().update(load_data(result_data_path))
ax.plot(Ls[1:], n2, '.', label=r'$\beta = %2.2f$' % beta,
color=cm.viridis(float(i) / len(path)))
ax.legend(loc='best')
ax.set_title('Averaged number of the sites on the cutting edges which \
is connected to two neighbors.' +
' (sample: {})'.format(num_of_strings))
ax.set_xlabel(r'Cutting size $L$')
ax.set_ylabel(r'$n_{2}$')
plt.show()
if __name__ == '__main__':
result_n2(set_data_path.data_path)
| true | true |
f71f444c99957dbabf46b27acb422416484d481f | 2,721 | py | Python | tests/core/type_controller_test.py | fedebruni84/controllerx | 922361b9a590d7483405ec26ed1d553798b7ed7f | [
"MIT"
] | null | null | null | tests/core/type_controller_test.py | fedebruni84/controllerx | 922361b9a590d7483405ec26ed1d553798b7ed7f | [
"MIT"
] | null | null | null | tests/core/type_controller_test.py | fedebruni84/controllerx | 922361b9a590d7483405ec26ed1d553798b7ed7f | [
"MIT"
] | null | null | null | import pytest
from cx_core.controller import TypeController
class FakeTypeController(TypeController):
def get_domain(self):
return "domain"
@pytest.fixture
def sut(hass_mock):
c = FakeTypeController()
c.args = {}
return c
# All entities from '{entity}' must be from {domain} domain (e.g. {domain}.bedroom)
# '{entity}' must be from {domain} domain (e.g. {domain}.bedroom)
@pytest.mark.parametrize(
"entity, domain, entities, error_expected",
[
("light.kitchen", "light", [], False),
("light1.kitchen", "light", [], True,),
("media_player.kitchen", "light", [], True,),
("media_player.bedroom", "media_player", [], False),
("group.all_lights", "light", ["light.light1", "light.light2"], False),
("group.all_lights", "light", ["light1.light1", "light2.light2"], True),
("group.all", "media_player", ["media_player.test", "light.test"], True),
],
)
@pytest.mark.asyncio
async def test_check_domain(
sut, mocker, monkeypatch, entity, domain, entities, error_expected
):
expected_error_message = ""
if error_expected:
if entities == []:
expected_error_message = (
f"'{entity}' must be from {domain} domain (e.g. {domain}.bedroom)"
)
else:
expected_error_message = f"All entities from '{entity}' must be from {domain} domain (e.g. {domain}.bedroom)"
async def fake_get_state(*args, **kwargs):
return entities
monkeypatch.setattr(sut, "get_state", fake_get_state)
monkeypatch.setattr(sut, "get_domain", lambda *args: domain)
if error_expected:
with pytest.raises(ValueError) as e:
await sut.check_domain(entity)
assert str(e.value) == expected_error_message
else:
await sut.check_domain(entity)
@pytest.mark.parametrize(
"entity_input, expected_calls", [("light.kitchen", 1), ("group.lights", 2)],
)
@pytest.mark.asyncio
async def test_get_entity_state(sut, mocker, monkeypatch, entity_input, expected_calls):
stub_get_state = mocker.stub()
async def fake_get_state(entity, attribute=None):
stub_get_state(entity, attribute=attribute)
return ["entity.test"]
monkeypatch.setattr(sut, "get_state", fake_get_state)
# SUT
await sut.get_entity_state(entity_input, "attribute_test")
# Checks
if expected_calls == 1:
stub_get_state.assert_called_once_with(entity_input, attribute="attribute_test")
elif expected_calls == 2:
stub_get_state.call_count == 2
stub_get_state.assert_any_call(entity_input, attribute="entity_id")
stub_get_state.assert_any_call("entity.test", attribute="attribute_test")
| 32.392857 | 121 | 0.660051 | import pytest
from cx_core.controller import TypeController
class FakeTypeController(TypeController):
def get_domain(self):
return "domain"
@pytest.fixture
def sut(hass_mock):
c = FakeTypeController()
c.args = {}
return c
@pytest.mark.parametrize(
"entity, domain, entities, error_expected",
[
("light.kitchen", "light", [], False),
("light1.kitchen", "light", [], True,),
("media_player.kitchen", "light", [], True,),
("media_player.bedroom", "media_player", [], False),
("group.all_lights", "light", ["light.light1", "light.light2"], False),
("group.all_lights", "light", ["light1.light1", "light2.light2"], True),
("group.all", "media_player", ["media_player.test", "light.test"], True),
],
)
@pytest.mark.asyncio
async def test_check_domain(
sut, mocker, monkeypatch, entity, domain, entities, error_expected
):
expected_error_message = ""
if error_expected:
if entities == []:
expected_error_message = (
f"'{entity}' must be from {domain} domain (e.g. {domain}.bedroom)"
)
else:
expected_error_message = f"All entities from '{entity}' must be from {domain} domain (e.g. {domain}.bedroom)"
async def fake_get_state(*args, **kwargs):
return entities
monkeypatch.setattr(sut, "get_state", fake_get_state)
monkeypatch.setattr(sut, "get_domain", lambda *args: domain)
if error_expected:
with pytest.raises(ValueError) as e:
await sut.check_domain(entity)
assert str(e.value) == expected_error_message
else:
await sut.check_domain(entity)
@pytest.mark.parametrize(
"entity_input, expected_calls", [("light.kitchen", 1), ("group.lights", 2)],
)
@pytest.mark.asyncio
async def test_get_entity_state(sut, mocker, monkeypatch, entity_input, expected_calls):
stub_get_state = mocker.stub()
async def fake_get_state(entity, attribute=None):
stub_get_state(entity, attribute=attribute)
return ["entity.test"]
monkeypatch.setattr(sut, "get_state", fake_get_state)
await sut.get_entity_state(entity_input, "attribute_test")
if expected_calls == 1:
stub_get_state.assert_called_once_with(entity_input, attribute="attribute_test")
elif expected_calls == 2:
stub_get_state.call_count == 2
stub_get_state.assert_any_call(entity_input, attribute="entity_id")
stub_get_state.assert_any_call("entity.test", attribute="attribute_test")
| true | true |
f71f46e9b66f50d5da46a8294920b9f874abe804 | 7,438 | py | Python | oauth2_provider/oauth2_backends.py | Transparent-CDN/django-oauth-toolkit | 0fb3d5a959ef2108c606e71064986b239540cab5 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2020-02-28T11:09:33.000Z | 2020-02-28T11:09:33.000Z | oauth2_provider/oauth2_backends.py | Transparent-CDN/django-oauth-toolkit | 0fb3d5a959ef2108c606e71064986b239540cab5 | [
"BSD-2-Clause-FreeBSD"
] | 4 | 2019-03-22T17:06:36.000Z | 2019-06-20T02:41:33.000Z | oauth2_provider/oauth2_backends.py | drchrono/django-oauth-toolkit | 846ab0ba8acaa3e4870b424700544aa6329511e4 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2019-10-19T01:03:44.000Z | 2019-10-19T01:03:44.000Z | import json
from urllib.parse import urlparse, urlunparse
from oauthlib import oauth2
from oauthlib.common import quote, urlencode, urlencoded
from .exceptions import FatalClientError, OAuthToolkitError
from .settings import oauth2_settings
class OAuthLibCore(object):
"""
TODO: add docs
"""
def __init__(self, server=None):
"""
:params server: An instance of oauthlib.oauth2.Server class
"""
self.server = server or oauth2_settings.OAUTH2_SERVER_CLASS(oauth2_settings.OAUTH2_VALIDATOR_CLASS())
def _get_escaped_full_path(self, request):
"""
Django considers "safe" some characters that aren't so for oauthlib.
We have to search for them and properly escape.
"""
parsed = list(urlparse(request.get_full_path()))
unsafe = set(c for c in parsed[4]).difference(urlencoded)
for c in unsafe:
parsed[4] = parsed[4].replace(c, quote(c, safe=b""))
return urlunparse(parsed)
def _get_extra_credentials(self, request):
"""
Produce extra credentials for token response. This dictionary will be
merged with the response.
See also: `oauthlib.oauth2.rfc6749.TokenEndpoint.create_token_response`
:param request: The current django.http.HttpRequest object
:return: dictionary of extra credentials or None (default)
"""
return None
def _extract_params(self, request):
"""
Extract parameters from the Django request object.
Such parameters will then be passed to OAuthLib to build its own
Request object. The body should be encoded using OAuthLib urlencoded.
"""
uri = self._get_escaped_full_path(request)
http_method = request.method
headers = self.extract_headers(request)
body = urlencode(self.extract_body(request))
return uri, http_method, body, headers
def extract_headers(self, request):
"""
Extracts headers from the Django request object
:param request: The current django.http.HttpRequest object
:return: a dictionary with OAuthLib needed headers
"""
headers = request.META.copy()
if "wsgi.input" in headers:
del headers["wsgi.input"]
if "wsgi.errors" in headers:
del headers["wsgi.errors"]
if "HTTP_AUTHORIZATION" in headers:
headers["Authorization"] = headers["HTTP_AUTHORIZATION"]
return headers
def extract_body(self, request):
"""
Extracts the POST body from the Django request object
:param request: The current django.http.HttpRequest object
:return: provided POST parameters
"""
return request.POST.items()
def validate_authorization_request(self, request):
"""
A wrapper method that calls validate_authorization_request on `server_class` instance.
:param request: The current django.http.HttpRequest object
"""
try:
uri, http_method, body, headers = self._extract_params(request)
scopes, credentials = self.server.validate_authorization_request(
uri, http_method=http_method, body=body, headers=headers)
return scopes, credentials
except oauth2.FatalClientError as error:
raise FatalClientError(error=error)
except oauth2.OAuth2Error as error:
raise OAuthToolkitError(error=error)
def create_authorization_response(self, request, scopes, credentials, allow):
"""
A wrapper method that calls create_authorization_response on `server_class`
instance.
:param request: The current django.http.HttpRequest object
:param scopes: A list of provided scopes
:param credentials: Authorization credentials dictionary containing
`client_id`, `state`, `redirect_uri`, `response_type`
:param allow: True if the user authorize the client, otherwise False
"""
try:
if not allow:
raise oauth2.AccessDeniedError(
state=credentials.get("state", None))
# add current user to credentials. this will be used by OAUTH2_VALIDATOR_CLASS
credentials["user"] = request.user
headers, body, status = self.server.create_authorization_response(
uri=credentials["redirect_uri"], scopes=scopes, credentials=credentials)
uri = headers.get("Location", None)
return uri, headers, body, status
except oauth2.FatalClientError as error:
raise FatalClientError(error=error, redirect_uri=credentials["redirect_uri"])
except oauth2.OAuth2Error as error:
raise OAuthToolkitError(error=error, redirect_uri=credentials["redirect_uri"])
def create_token_response(self, request):
"""
A wrapper method that calls create_token_response on `server_class` instance.
:param request: The current django.http.HttpRequest object
"""
uri, http_method, body, headers = self._extract_params(request)
extra_credentials = self._get_extra_credentials(request)
headers, body, status = self.server.create_token_response(uri, http_method, body,
headers, extra_credentials)
uri = headers.get("Location", None)
return uri, headers, body, status
def create_revocation_response(self, request):
"""
A wrapper method that calls create_revocation_response on a
`server_class` instance.
:param request: The current django.http.HttpRequest object
"""
uri, http_method, body, headers = self._extract_params(request)
headers, body, status = self.server.create_revocation_response(
uri, http_method, body, headers)
uri = headers.get("Location", None)
return uri, headers, body, status
def verify_request(self, request, scopes):
"""
A wrapper method that calls verify_request on `server_class` instance.
:param request: The current django.http.HttpRequest object
:param scopes: A list of scopes required to verify so that request is verified
"""
uri, http_method, body, headers = self._extract_params(request)
valid, r = self.server.verify_request(uri, http_method, body, headers, scopes=scopes)
return valid, r
class JSONOAuthLibCore(OAuthLibCore):
"""
Extends the default OAuthLibCore to parse correctly application/json requests
"""
def extract_body(self, request):
"""
Extracts the JSON body from the Django request object
:param request: The current django.http.HttpRequest object
:return: provided POST parameters "urlencodable"
"""
try:
body = json.loads(request.body.decode("utf-8")).items()
except AttributeError:
body = ""
except ValueError:
body = ""
return body
def get_oauthlib_core():
"""
Utility function that take a request and returns an instance of
`oauth2_provider.backends.OAuthLibCore`
"""
validator = oauth2_settings.OAUTH2_VALIDATOR_CLASS()
server = oauth2_settings.OAUTH2_SERVER_CLASS(validator)
return oauth2_settings.OAUTH2_BACKEND_CLASS(server)
| 37.565657 | 109 | 0.655956 | import json
from urllib.parse import urlparse, urlunparse
from oauthlib import oauth2
from oauthlib.common import quote, urlencode, urlencoded
from .exceptions import FatalClientError, OAuthToolkitError
from .settings import oauth2_settings
class OAuthLibCore(object):
def __init__(self, server=None):
self.server = server or oauth2_settings.OAUTH2_SERVER_CLASS(oauth2_settings.OAUTH2_VALIDATOR_CLASS())
def _get_escaped_full_path(self, request):
parsed = list(urlparse(request.get_full_path()))
unsafe = set(c for c in parsed[4]).difference(urlencoded)
for c in unsafe:
parsed[4] = parsed[4].replace(c, quote(c, safe=b""))
return urlunparse(parsed)
def _get_extra_credentials(self, request):
return None
def _extract_params(self, request):
uri = self._get_escaped_full_path(request)
http_method = request.method
headers = self.extract_headers(request)
body = urlencode(self.extract_body(request))
return uri, http_method, body, headers
def extract_headers(self, request):
headers = request.META.copy()
if "wsgi.input" in headers:
del headers["wsgi.input"]
if "wsgi.errors" in headers:
del headers["wsgi.errors"]
if "HTTP_AUTHORIZATION" in headers:
headers["Authorization"] = headers["HTTP_AUTHORIZATION"]
return headers
def extract_body(self, request):
return request.POST.items()
def validate_authorization_request(self, request):
try:
uri, http_method, body, headers = self._extract_params(request)
scopes, credentials = self.server.validate_authorization_request(
uri, http_method=http_method, body=body, headers=headers)
return scopes, credentials
except oauth2.FatalClientError as error:
raise FatalClientError(error=error)
except oauth2.OAuth2Error as error:
raise OAuthToolkitError(error=error)
def create_authorization_response(self, request, scopes, credentials, allow):
try:
if not allow:
raise oauth2.AccessDeniedError(
state=credentials.get("state", None))
credentials["user"] = request.user
headers, body, status = self.server.create_authorization_response(
uri=credentials["redirect_uri"], scopes=scopes, credentials=credentials)
uri = headers.get("Location", None)
return uri, headers, body, status
except oauth2.FatalClientError as error:
raise FatalClientError(error=error, redirect_uri=credentials["redirect_uri"])
except oauth2.OAuth2Error as error:
raise OAuthToolkitError(error=error, redirect_uri=credentials["redirect_uri"])
def create_token_response(self, request):
uri, http_method, body, headers = self._extract_params(request)
extra_credentials = self._get_extra_credentials(request)
headers, body, status = self.server.create_token_response(uri, http_method, body,
headers, extra_credentials)
uri = headers.get("Location", None)
return uri, headers, body, status
def create_revocation_response(self, request):
uri, http_method, body, headers = self._extract_params(request)
headers, body, status = self.server.create_revocation_response(
uri, http_method, body, headers)
uri = headers.get("Location", None)
return uri, headers, body, status
def verify_request(self, request, scopes):
uri, http_method, body, headers = self._extract_params(request)
valid, r = self.server.verify_request(uri, http_method, body, headers, scopes=scopes)
return valid, r
class JSONOAuthLibCore(OAuthLibCore):
def extract_body(self, request):
try:
body = json.loads(request.body.decode("utf-8")).items()
except AttributeError:
body = ""
except ValueError:
body = ""
return body
def get_oauthlib_core():
validator = oauth2_settings.OAUTH2_VALIDATOR_CLASS()
server = oauth2_settings.OAUTH2_SERVER_CLASS(validator)
return oauth2_settings.OAUTH2_BACKEND_CLASS(server)
| true | true |
f71f48f8e33574e8e90e99e4f9578c5f409fad74 | 946 | py | Python | setup.py | kellyjonbrazil/jtbl | 9bfc755bc964fbed59a4884bc4be605a5065f3d8 | [
"MIT"
] | 108 | 2020-03-10T13:22:03.000Z | 2022-03-30T03:09:38.000Z | setup.py | kellyjonbrazil/jtbl | 9bfc755bc964fbed59a4884bc4be605a5065f3d8 | [
"MIT"
] | 9 | 2020-03-08T00:44:38.000Z | 2022-02-15T19:36:04.000Z | setup.py | kellyjonbrazil/jtbl | 9bfc755bc964fbed59a4884bc4be605a5065f3d8 | [
"MIT"
] | 5 | 2020-03-10T11:34:18.000Z | 2021-08-02T10:57:43.000Z | import setuptools
with open('README.md', 'r') as f:
long_description = f.read()
setuptools.setup(
name='jtbl',
version='1.1.7',
author='Kelly Brazil',
author_email='kellyjonbrazil@gmail.com',
description='A simple cli tool to print JSON and JSON Lines data as a table in the terminal.',
install_requires=[
'tabulate>=0.8.6'
],
license='MIT',
long_description=long_description,
long_description_content_type='text/markdown',
python_requires='>=3.6',
url='https://github.com/kellyjonbrazil/jtbl',
packages=setuptools.find_packages(exclude=['*.tests', '*.tests.*', 'tests.*', 'tests']),
entry_points={
'console_scripts': [
'jtbl=jtbl.cli:main'
]
},
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Utilities'
]
)
| 28.666667 | 98 | 0.616279 | import setuptools
with open('README.md', 'r') as f:
long_description = f.read()
setuptools.setup(
name='jtbl',
version='1.1.7',
author='Kelly Brazil',
author_email='kellyjonbrazil@gmail.com',
description='A simple cli tool to print JSON and JSON Lines data as a table in the terminal.',
install_requires=[
'tabulate>=0.8.6'
],
license='MIT',
long_description=long_description,
long_description_content_type='text/markdown',
python_requires='>=3.6',
url='https://github.com/kellyjonbrazil/jtbl',
packages=setuptools.find_packages(exclude=['*.tests', '*.tests.*', 'tests.*', 'tests']),
entry_points={
'console_scripts': [
'jtbl=jtbl.cli:main'
]
},
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Utilities'
]
)
| true | true |
f71f497fb7582513c2d45b7633de0c7c9d7f7303 | 3,186 | py | Python | talk_lib/tests/testtalk.py | allankellynet/mimas | 10025d43bba9e84f502a266760786842e7158a05 | [
"MIT"
] | null | null | null | talk_lib/tests/testtalk.py | allankellynet/mimas | 10025d43bba9e84f502a266760786842e7158a05 | [
"MIT"
] | 1 | 2020-02-05T13:00:29.000Z | 2020-02-05T13:00:29.000Z | talk_lib/tests/testtalk.py | allankellynet/mimas | 10025d43bba9e84f502a266760786842e7158a05 | [
"MIT"
] | null | null | null | #-----------------------------------------------------
# Mimas: conference submission and review system
# (c) Allan Kelly 2016-2020 http://www.allankelly.net
# Licensed under MIT License, see LICENSE file
# -----------------------------------------------------
import unittest
from google.appengine.ext import testbed
from speaker_lib import speaker
from talk_lib import talk
class TestTalk(unittest.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
def tearDown(self):
self.testbed.deactivate()
def test_field_access(self):
t = talk.Talk()
self.assertEquals(t.title, "")
t.title = "Wonderful"
self.assertEquals(t.title, "Wonderful")
self.assertEquals(t.title, "Wonderful".encode('ascii', 'ignore'))
def test_talk_fields(self):
t = talk.Talk()
self.assertEquals(t.title, "")
t.title = "Great talk"
self.assertEquals(t.title, "Great talk")
def test_store_retrieve(self):
spk1 = speaker.make_new_speaker("who@email")
spk1.put()
t1 = talk.Talk(parent=spk1.key)
t1.title = "Wonderful"
t1.put()
t2 = talk.Talk(parent=spk1.key)
t2.title = "Great"
t2.put()
user1_talks = talk.all_user_talks_by_email(spk1.email)
self.assertEquals(len(user1_talks), 2)
spk2 = speaker.make_new_speaker("nobody@email")
spk2.put()
t3 = talk.Talk(parent=spk2.key)
t3.title = "Smashing"
t3.put()
user2_talks = talk.all_user_talks_by_email(spk2.email)
self.assertEquals(len(user2_talks), 1)
t2.key.delete()
user1_talks = talk.all_user_talks_by_email(spk1.email)
self.assertEquals(len(user1_talks), 1)
def test_store_retrieve_by_key(self):
spk1 = speaker.make_new_speaker("who@email")
spk1.put()
t1 = talk.Talk(parent=spk1.key)
t1.title = "Wonderful"
t1.put()
t2 = talk.Talk(parent=spk1.key)
t2.title = "Great"
t2.put()
user1_talks = talk.speaker_talks_by_key(spk1.key)
self.assertEquals(len(user1_talks), 2)
spk2 = speaker.make_new_speaker("nobody@email")
spk2.put()
t3 = talk.Talk(parent=spk2.key)
t3.title = "Smashing"
t3.put()
user2_talks = talk.speaker_talks_by_key(spk2.key)
self.assertEquals(len(user2_talks), 1)
t2.key.delete()
user1_talks = talk.all_user_talks_by_email(spk1.email)
self.assertEquals(len(user1_talks), 1)
def test_no_such_speaker(self):
talks = talk.all_user_talks_by_email("nosuch@nowhere")
self.assertEquals(len(talks), 0)
def test_directory_listing(self):
spk1 = speaker.make_new_speaker("who@email")
spk1.put()
t1_key = talk.mk_talk(spk1.key, "Wonderful")
t1 = t1_key.get()
self.assertTrue(t1.is_listed())
t1.hide_listing()
self.assertFalse(t1.is_listed())
t1.show_listing()
self.assertTrue(t1.is_listed())
| 29.775701 | 73 | 0.605775 |
import unittest
from google.appengine.ext import testbed
from speaker_lib import speaker
from talk_lib import talk
class TestTalk(unittest.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
def tearDown(self):
self.testbed.deactivate()
def test_field_access(self):
t = talk.Talk()
self.assertEquals(t.title, "")
t.title = "Wonderful"
self.assertEquals(t.title, "Wonderful")
self.assertEquals(t.title, "Wonderful".encode('ascii', 'ignore'))
def test_talk_fields(self):
t = talk.Talk()
self.assertEquals(t.title, "")
t.title = "Great talk"
self.assertEquals(t.title, "Great talk")
def test_store_retrieve(self):
spk1 = speaker.make_new_speaker("who@email")
spk1.put()
t1 = talk.Talk(parent=spk1.key)
t1.title = "Wonderful"
t1.put()
t2 = talk.Talk(parent=spk1.key)
t2.title = "Great"
t2.put()
user1_talks = talk.all_user_talks_by_email(spk1.email)
self.assertEquals(len(user1_talks), 2)
spk2 = speaker.make_new_speaker("nobody@email")
spk2.put()
t3 = talk.Talk(parent=spk2.key)
t3.title = "Smashing"
t3.put()
user2_talks = talk.all_user_talks_by_email(spk2.email)
self.assertEquals(len(user2_talks), 1)
t2.key.delete()
user1_talks = talk.all_user_talks_by_email(spk1.email)
self.assertEquals(len(user1_talks), 1)
def test_store_retrieve_by_key(self):
spk1 = speaker.make_new_speaker("who@email")
spk1.put()
t1 = talk.Talk(parent=spk1.key)
t1.title = "Wonderful"
t1.put()
t2 = talk.Talk(parent=spk1.key)
t2.title = "Great"
t2.put()
user1_talks = talk.speaker_talks_by_key(spk1.key)
self.assertEquals(len(user1_talks), 2)
spk2 = speaker.make_new_speaker("nobody@email")
spk2.put()
t3 = talk.Talk(parent=spk2.key)
t3.title = "Smashing"
t3.put()
user2_talks = talk.speaker_talks_by_key(spk2.key)
self.assertEquals(len(user2_talks), 1)
t2.key.delete()
user1_talks = talk.all_user_talks_by_email(spk1.email)
self.assertEquals(len(user1_talks), 1)
def test_no_such_speaker(self):
talks = talk.all_user_talks_by_email("nosuch@nowhere")
self.assertEquals(len(talks), 0)
def test_directory_listing(self):
spk1 = speaker.make_new_speaker("who@email")
spk1.put()
t1_key = talk.mk_talk(spk1.key, "Wonderful")
t1 = t1_key.get()
self.assertTrue(t1.is_listed())
t1.hide_listing()
self.assertFalse(t1.is_listed())
t1.show_listing()
self.assertTrue(t1.is_listed())
| true | true |
f71f4d609651d9bc64373e010c165faa55a5f9cf | 3,278 | py | Python | beyond_tutorial/settings.py | shlior7/beyond-tutorial | 502618b125e9a81d334683b845b248fd750abc77 | [
"MIT"
] | null | null | null | beyond_tutorial/settings.py | shlior7/beyond-tutorial | 502618b125e9a81d334683b845b248fd750abc77 | [
"MIT"
] | null | null | null | beyond_tutorial/settings.py | shlior7/beyond-tutorial | 502618b125e9a81d334683b845b248fd750abc77 | [
"MIT"
] | null | null | null | """
Django settings for beyond_tutorial project.
Generated by 'django-admin startproject' using Django 4.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.0/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-s$+txx&pz8eeh$_+wbakb!i+1o%9ijf*=n0e6=k4d^ix_kfv7d'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'msgboard.apps.MsgboardConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'beyond_tutorial.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'beyond_tutorial.wsgi.application'
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = 'static/'
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 27.546218 | 91 | 0.705613 |
from pathlib import Path
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = 'django-insecure-s$+txx&pz8eeh$_+wbakb!i+1o%9ijf*=n0e6=k4d^ix_kfv7d'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'msgboard.apps.MsgboardConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'beyond_tutorial.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'beyond_tutorial.wsgi.application'
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = 'static/'
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| true | true |
f71f4d8ff46ec6e4a8d5c7681ef34b9994b20203 | 13,826 | py | Python | xltable/expression.py | fkarb/xltable | 7a592642d27ad5ee90d2aa8c26338abaa9d84bea | [
"MIT"
] | 4 | 2017-03-09T20:04:35.000Z | 2020-01-18T16:24:33.000Z | xltable/expression.py | fkarb/xltable | 7a592642d27ad5ee90d2aa8c26338abaa9d84bea | [
"MIT"
] | 6 | 2017-12-05T13:22:10.000Z | 2018-01-29T13:50:27.000Z | xltable/expression.py | fkarb/xltable | 7a592642d27ad5ee90d2aa8c26338abaa9d84bea | [
"MIT"
] | 6 | 2017-10-26T16:44:27.000Z | 2021-08-16T19:39:21.000Z | """
Expressions for building excel formulas without having to use concrete positions.
"""
import operator
import re
class Expression(object):
"""
Base class for all worksheet expressions.
Expressions are used to build formulas referencing ranges in the
worksheet by labels which are resolved to cell references when the
worksheet is written out.
Expressions may be combined using binary operators.
"""
def __init__(self, value=None):
if value is not None:
self.value = value
def __add__(self, other):
return BinOp(self, _make_expr(other), "+")
def __sub__(self, other):
return BinOp(self, _make_expr(other), "-")
def __mul__(self, other):
return BinOp(self, _make_expr(other), "*")
def __truediv__(self, other):
return BinOp(self, _make_expr(other), "/")
def __lt__(self, other):
return BinOp(self, _make_expr(other), "<")
def __le__(self, other):
return BinOp(self, _make_expr(other), "<=")
def __eq__(self, other):
return BinOp(self, _make_expr(other), "=")
def __ne__(self, other):
return BinOp(self, _make_expr(other), "<>")
def __gt__(self, other):
return BinOp(self, _make_expr(other), ">")
def __ge__(self, other):
return BinOp(self, _make_expr(other), ">=")
def __and__(self, other):
return BinOp(self, _make_expr(other), "&")
def get_formula(self, workbook, row, col):
return "=%s" % self._strip(self.resolve(workbook, row, col))
@property
def value(self):
"""Set a calculated value for this Expression.
Used when writing formulas using XlsxWriter to give cells
an initial value when the sheet is loaded without being calculated.
"""
try:
if isinstance(self.__value, Expression):
return self.__value.value
return self.__value
except AttributeError:
return 0
@property
def has_value(self):
"""return True if value has been set"""
try:
if isinstance(self.__value, Expression):
return self.__value.has_value
return True
except AttributeError:
return False
@value.setter
def value(self, value):
self.__value = value
@staticmethod
def _strip(x):
# strip off the outer parentheses if they match
return re.sub("^\((.*)\)$", r"\1", x)
def resolve(self, workbook, worksheet, col, row):
raise NotImplementedError("Expression.resolve")
class Cell(Expression):
"""
Reference to a cell in a table.
:param col: Column label this refers to.
:param row: Row label this refers to, or None to use the current row.
:param row_offset: Offset from the row, used when resolving.
:param table: Name of table the column is in, if not in the same table this expression is in.
Use "%s!%s" % (worksheet.name, table.name) if refering to a table in another worksheet
:param col_fixed: If True when converted to an address the column will be fixed.
:param row_fixed: If True when converted to an address the row will be fixed.
"""
def __init__(self, col, row=None, row_offset=0, table=None, col_fixed=None, row_fixed=None, **kwargs):
super(Cell, self).__init__(**kwargs)
self.__col = col
self.__row = row
self.__row_offset = row_offset
self.__table = table
self.__col_fixed = col_fixed
self.__row_fixed = row_fixed
def resolve(self, workbook, row, col):
table, worksheet = workbook.get_table(self.__table)
top, left = worksheet.get_table_pos(table.name)
col_offset = table.get_column_offset(self.__col)
# if the row has been given use fixed references in the formula unless they've been set explicitly
if self.__row is not None:
row = table.get_row_offset(self.__row)
row_fixed = self.__row_fixed if self.__row_fixed is not None else True
col_fixed = self.__col_fixed if self.__col_fixed is not None else True
else:
# otherwise use un-fixed addresses, unless set explicitly
row_fixed = self.__row_fixed if self.__row_fixed is not None else False
col_fixed = self.__col_fixed if self.__col_fixed is not None else False
return _to_addr(worksheet.name,
top + row + self.__row_offset,
left + col_offset,
row_fixed=row_fixed,
col_fixed=col_fixed)
class Column(Expression):
"""
Reference to a column in a table.
:param col: Column label this refers to.
:param include_header: True if this expression should include the column header.
:param table: Name of table the column is in, if not in the same table this expression is in.
Use "%s!%s" % (worksheet.name, table.name) if refering to a table in another worksheet
:param col_fixed: If True when converted to an address the column will be fixed.
:param row_fixed: If True when converted to an address the row will be fixed.
"""
def __init__(self, col, include_header=False, table=None, col_fixed=True, row_fixed=True, **kwargs):
super(Column, self).__init__(**kwargs)
self.__col = col
self.__include_header = include_header
self.__table = table
self.__col_fixed = col_fixed
self.__row_fixed = row_fixed
def resolve(self, workbook, row, col):
table, worksheet = workbook.get_table(self.__table)
top, left = worksheet.get_table_pos(table.name)
col_offset = table.get_column_offset(self.__col)
row_offset = 0 if self.__include_header else table.header_height
return "'%s'!%s:%s" % (
worksheet.name,
_to_addr(None, top + row_offset, left + col_offset,
row_fixed=self.__row_fixed,
col_fixed=self.__col_fixed),
_to_addr(None, top + table.height - 1, left + col_offset,
row_fixed=self.__row_fixed,
col_fixed=self.__col_fixed))
class Index(Expression):
"""
Reference to a table's index.
:param include_header: True if this expression should include the index header.
:param table: Name of table that owns the index, if not the table this expression is in.
Use "%s!%s" % (worksheet.name, table.name) if refering to a table in another worksheet
:param col_fixed: If True when converted to an address the column will be fixed.
:param row_fixed: If True when converted to an address the row will be fixed.
"""
def __init__(self, include_header=False, table=None, col_fixed=True, row_fixed=True, **kwargs):
super(Index, self).__init__(**kwargs)
self.__include_header = include_header
self.__table = table
self.__col_fixed = col_fixed
self.__row_fixed = row_fixed
def resolve(self, workbook, row, col):
table, worksheet = workbook.get_table(self.__table)
top, left = worksheet.get_table_pos(table.name)
col_offset = table.get_index_offset()
row_offset = 0 if self.__include_header else table.header_height
return "'%s'!%s:%s" % (
worksheet.name,
_to_addr(None, top + row_offset, left + col_offset,
row_fixed=self.__row_fixed,
col_fixed=self.__col_fixed),
_to_addr(None, top + table.height - 1, left + col_offset,
row_fixed=self.__row_fixed,
col_fixed=self.__col_fixed))
class Range(Expression):
"""
Reference to a range in a table.
:param left_col: Left most column label this refers to.
:param right_col: Right most column label this refers to.
:param top_row: Top most row label, or None to select from the top of the table.
:param bottom_row: Bottom most row label, or None to select to the bottom of the table.
:param include_header: Include table header in the range.
:param table: Name of table the column is in, if not in the same table this expression is in.
Use "%s!%s" % (worksheet.name, table.name) if refering to a table in another worksheet
:param col_fixed: If True when converted to an address the column will be fixed.
:param row_fixed: If True when converted to an address the row will be fixed.
"""
def __init__(self,
left_col,
right_col,
top_row=None,
bottom_row=None,
include_header=True,
table=None,
col_fixed=True,
row_fixed=True,
**kwargs):
super(Range, self).__init__(**kwargs)
self.__left_col = left_col
self.__right_col = right_col
self.__top = top_row
self.__bottom = bottom_row
self.__include_header = include_header
self.__table = table
self.__col_fixed = col_fixed
self.__row_fixed = row_fixed
def resolve(self, workbook, row, col):
table, worksheet = workbook.get_table(self.__table)
top, left = worksheet.get_table_pos(table.name)
left_col_offset = table.get_column_offset(self.__left_col)
right_col_offset = table.get_column_offset(self.__right_col)
if self.__top is None:
top_row_offset = 0 if self.__include_header else table.header_height
else:
top_row_offset = table.get_row_offset(self.__top)
if self.__bottom is None:
bottom_row_offset = table.height - 1
else:
bottom_row_offset = table.get_row_offset(self.__bottom)
return "'%s'!%s:%s" % (
worksheet.name,
_to_addr(None, top + top_row_offset, left + left_col_offset,
row_fixed=self.__row_fixed,
col_fixed=self.__col_fixed),
_to_addr(None, top + bottom_row_offset, left + right_col_offset,
row_fixed=self.__row_fixed,
col_fixed=self.__col_fixed))
class Formula(Expression):
"""
Formula expression.
E.g. to create a formula like "=SUMPRODUCT(a, b)" where a and b
are columns in a table you would do::
formula = Formula("SUMPRODUCT", Column("col_a"), Column("col_b"))
:param name: Name of Excel function, eg "SUMPRODUCT".
:param args: Expressions to use as arguments to the function.
"""
def __init__(self, name, *args, **kwargs):
super(Formula, self).__init__(**kwargs)
self.__name = name
self.__args = args
def resolve(self, workbook, row, col):
def to_arg(x):
if x is None:
return ""
return self._strip(_make_expr(x).resolve(workbook, row, col))
args = [to_arg(x) for x in self.__args]
return "%s(%s)" % (self.__name, ",".join(args))
class ArrayExpression(Expression):
"""
Wraps an expression in an array formula (ie. surrounds it with {})
:param xltable.Expression expr: Expression to be wrapped
"""
def __init__(self, expr):
Expression.__init__(self, expr)
self.__expr = expr
def resolve(self, workbook, row, col):
return self.__expr.resolve(workbook, row, col)
def get_formula(self, workbook, row, col):
return "{%s}" % self.__expr.get_formula(workbook, row, col).strip("{}")
class BinOp(Expression):
"""
Internal use - composite expression combining two expression with a binary operator.
"""
__operators = {
"+": operator.add,
"-": operator.sub,
"*": operator.mul,
"/": operator.truediv,
">": operator.gt,
"<": operator.lt,
"<=": operator.le,
">=": operator.ge,
"!=": operator.ne,
"=": operator.eq,
"&": operator.and_,
"|": operator.or_,
}
def __init__(self, lhs, rhs, op, **kwargs):
super(BinOp, self).__init__(**kwargs)
self.__lhs = lhs
self.__rhs = rhs
self.__op = op
if lhs.has_value and rhs.has_value:
self.value = self.__operators[op](lhs.value, rhs.value)
def resolve(self, workbook, row, col):
return "(%s%s%s)" % (
self.__lhs.resolve(workbook, row, col),
self.__op,
self.__rhs.resolve(workbook, row, col))
class ConstExpr(Expression):
"""
Internal use - expression for wrapping constants.
"""
def __init__(self, value, **kwargs):
super(ConstExpr, self).__init__(**kwargs)
self.value = value
self.__value = value
def resolve(self, workbook, row, col):
if isinstance(self.__value, str):
return '"%s"' % self.__value
if isinstance(self.__value, bool):
return "TRUE" if self.__value else "FALSE"
return str(self.__value)
def _to_addr(worksheet, row, col, row_fixed=False, col_fixed=False):
"""converts a (0,0) based coordinate to an excel address"""
addr = ""
A = ord('A')
col += 1
while col > 0:
addr = chr(A + ((col - 1) % 26)) + addr
col = (col - 1) // 26
prefix = ("'%s'!" % worksheet) if worksheet else ""
col_modifier = "$" if col_fixed else ""
row_modifier = "$" if row_fixed else ""
return prefix + "%s%s%s%d" % (col_modifier, addr, row_modifier, row+1)
def _make_expr(x):
if isinstance(x, Expression):
return x
return ConstExpr(x)
| 36.67374 | 106 | 0.607913 | import operator
import re
class Expression(object):
def __init__(self, value=None):
if value is not None:
self.value = value
def __add__(self, other):
return BinOp(self, _make_expr(other), "+")
def __sub__(self, other):
return BinOp(self, _make_expr(other), "-")
def __mul__(self, other):
return BinOp(self, _make_expr(other), "*")
def __truediv__(self, other):
return BinOp(self, _make_expr(other), "/")
def __lt__(self, other):
return BinOp(self, _make_expr(other), "<")
def __le__(self, other):
return BinOp(self, _make_expr(other), "<=")
def __eq__(self, other):
return BinOp(self, _make_expr(other), "=")
def __ne__(self, other):
return BinOp(self, _make_expr(other), "<>")
def __gt__(self, other):
return BinOp(self, _make_expr(other), ">")
def __ge__(self, other):
return BinOp(self, _make_expr(other), ">=")
def __and__(self, other):
return BinOp(self, _make_expr(other), "&")
def get_formula(self, workbook, row, col):
return "=%s" % self._strip(self.resolve(workbook, row, col))
@property
def value(self):
try:
if isinstance(self.__value, Expression):
return self.__value.value
return self.__value
except AttributeError:
return 0
@property
def has_value(self):
try:
if isinstance(self.__value, Expression):
return self.__value.has_value
return True
except AttributeError:
return False
@value.setter
def value(self, value):
self.__value = value
@staticmethod
def _strip(x):
return re.sub("^\((.*)\)$", r"\1", x)
def resolve(self, workbook, worksheet, col, row):
raise NotImplementedError("Expression.resolve")
class Cell(Expression):
def __init__(self, col, row=None, row_offset=0, table=None, col_fixed=None, row_fixed=None, **kwargs):
super(Cell, self).__init__(**kwargs)
self.__col = col
self.__row = row
self.__row_offset = row_offset
self.__table = table
self.__col_fixed = col_fixed
self.__row_fixed = row_fixed
def resolve(self, workbook, row, col):
table, worksheet = workbook.get_table(self.__table)
top, left = worksheet.get_table_pos(table.name)
col_offset = table.get_column_offset(self.__col)
if self.__row is not None:
row = table.get_row_offset(self.__row)
row_fixed = self.__row_fixed if self.__row_fixed is not None else True
col_fixed = self.__col_fixed if self.__col_fixed is not None else True
else:
# otherwise use un-fixed addresses, unless set explicitly
row_fixed = self.__row_fixed if self.__row_fixed is not None else False
col_fixed = self.__col_fixed if self.__col_fixed is not None else False
return _to_addr(worksheet.name,
top + row + self.__row_offset,
left + col_offset,
row_fixed=row_fixed,
col_fixed=col_fixed)
class Column(Expression):
def __init__(self, col, include_header=False, table=None, col_fixed=True, row_fixed=True, **kwargs):
super(Column, self).__init__(**kwargs)
self.__col = col
self.__include_header = include_header
self.__table = table
self.__col_fixed = col_fixed
self.__row_fixed = row_fixed
def resolve(self, workbook, row, col):
table, worksheet = workbook.get_table(self.__table)
top, left = worksheet.get_table_pos(table.name)
col_offset = table.get_column_offset(self.__col)
row_offset = 0 if self.__include_header else table.header_height
return "'%s'!%s:%s" % (
worksheet.name,
_to_addr(None, top + row_offset, left + col_offset,
row_fixed=self.__row_fixed,
col_fixed=self.__col_fixed),
_to_addr(None, top + table.height - 1, left + col_offset,
row_fixed=self.__row_fixed,
col_fixed=self.__col_fixed))
class Index(Expression):
def __init__(self, include_header=False, table=None, col_fixed=True, row_fixed=True, **kwargs):
super(Index, self).__init__(**kwargs)
self.__include_header = include_header
self.__table = table
self.__col_fixed = col_fixed
self.__row_fixed = row_fixed
def resolve(self, workbook, row, col):
table, worksheet = workbook.get_table(self.__table)
top, left = worksheet.get_table_pos(table.name)
col_offset = table.get_index_offset()
row_offset = 0 if self.__include_header else table.header_height
return "'%s'!%s:%s" % (
worksheet.name,
_to_addr(None, top + row_offset, left + col_offset,
row_fixed=self.__row_fixed,
col_fixed=self.__col_fixed),
_to_addr(None, top + table.height - 1, left + col_offset,
row_fixed=self.__row_fixed,
col_fixed=self.__col_fixed))
class Range(Expression):
def __init__(self,
left_col,
right_col,
top_row=None,
bottom_row=None,
include_header=True,
table=None,
col_fixed=True,
row_fixed=True,
**kwargs):
super(Range, self).__init__(**kwargs)
self.__left_col = left_col
self.__right_col = right_col
self.__top = top_row
self.__bottom = bottom_row
self.__include_header = include_header
self.__table = table
self.__col_fixed = col_fixed
self.__row_fixed = row_fixed
def resolve(self, workbook, row, col):
table, worksheet = workbook.get_table(self.__table)
top, left = worksheet.get_table_pos(table.name)
left_col_offset = table.get_column_offset(self.__left_col)
right_col_offset = table.get_column_offset(self.__right_col)
if self.__top is None:
top_row_offset = 0 if self.__include_header else table.header_height
else:
top_row_offset = table.get_row_offset(self.__top)
if self.__bottom is None:
bottom_row_offset = table.height - 1
else:
bottom_row_offset = table.get_row_offset(self.__bottom)
return "'%s'!%s:%s" % (
worksheet.name,
_to_addr(None, top + top_row_offset, left + left_col_offset,
row_fixed=self.__row_fixed,
col_fixed=self.__col_fixed),
_to_addr(None, top + bottom_row_offset, left + right_col_offset,
row_fixed=self.__row_fixed,
col_fixed=self.__col_fixed))
class Formula(Expression):
def __init__(self, name, *args, **kwargs):
super(Formula, self).__init__(**kwargs)
self.__name = name
self.__args = args
def resolve(self, workbook, row, col):
def to_arg(x):
if x is None:
return ""
return self._strip(_make_expr(x).resolve(workbook, row, col))
args = [to_arg(x) for x in self.__args]
return "%s(%s)" % (self.__name, ",".join(args))
class ArrayExpression(Expression):
def __init__(self, expr):
Expression.__init__(self, expr)
self.__expr = expr
def resolve(self, workbook, row, col):
return self.__expr.resolve(workbook, row, col)
def get_formula(self, workbook, row, col):
return "{%s}" % self.__expr.get_formula(workbook, row, col).strip("{}")
class BinOp(Expression):
__operators = {
"+": operator.add,
"-": operator.sub,
"*": operator.mul,
"/": operator.truediv,
">": operator.gt,
"<": operator.lt,
"<=": operator.le,
">=": operator.ge,
"!=": operator.ne,
"=": operator.eq,
"&": operator.and_,
"|": operator.or_,
}
def __init__(self, lhs, rhs, op, **kwargs):
super(BinOp, self).__init__(**kwargs)
self.__lhs = lhs
self.__rhs = rhs
self.__op = op
if lhs.has_value and rhs.has_value:
self.value = self.__operators[op](lhs.value, rhs.value)
def resolve(self, workbook, row, col):
return "(%s%s%s)" % (
self.__lhs.resolve(workbook, row, col),
self.__op,
self.__rhs.resolve(workbook, row, col))
class ConstExpr(Expression):
def __init__(self, value, **kwargs):
super(ConstExpr, self).__init__(**kwargs)
self.value = value
self.__value = value
def resolve(self, workbook, row, col):
if isinstance(self.__value, str):
return '"%s"' % self.__value
if isinstance(self.__value, bool):
return "TRUE" if self.__value else "FALSE"
return str(self.__value)
def _to_addr(worksheet, row, col, row_fixed=False, col_fixed=False):
addr = ""
A = ord('A')
col += 1
while col > 0:
addr = chr(A + ((col - 1) % 26)) + addr
col = (col - 1) // 26
prefix = ("'%s'!" % worksheet) if worksheet else ""
col_modifier = "$" if col_fixed else ""
row_modifier = "$" if row_fixed else ""
return prefix + "%s%s%s%d" % (col_modifier, addr, row_modifier, row+1)
def _make_expr(x):
if isinstance(x, Expression):
return x
return ConstExpr(x)
| true | true |
f71f4dd1d3b032910ffb279d50397befdfd25e03 | 4,066 | py | Python | benchmark/startQiskit_noisy2042.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_noisy2042.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_noisy2042.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=4
# total number=36
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.cx(input_qubit[0],input_qubit[3]) # number=13
prog.cx(input_qubit[0],input_qubit[3]) # number=17
prog.x(input_qubit[3]) # number=18
prog.cx(input_qubit[0],input_qubit[3]) # number=19
prog.cx(input_qubit[0],input_qubit[3]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.cx(input_qubit[0],input_qubit[3]) # number=27
prog.x(input_qubit[3]) # number=28
prog.h(input_qubit[3]) # number=30
prog.cz(input_qubit[0],input_qubit[3]) # number=31
prog.h(input_qubit[3]) # number=32
prog.cx(input_qubit[3],input_qubit[0]) # number=20
prog.h(input_qubit[0]) # number=33
prog.cz(input_qubit[3],input_qubit[0]) # number=34
prog.h(input_qubit[0]) # number=35
prog.z(input_qubit[3]) # number=24
prog.cx(input_qubit[3],input_qubit[0]) # number=25
prog.cx(input_qubit[3],input_qubit[0]) # number=22
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.y(input_qubit[2]) # number=11
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = FakeVigo()
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy2042.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 34.457627 | 140 | 0.65396 |
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.cx(input_qubit[0],input_qubit[3])
prog.cx(input_qubit[0],input_qubit[3])
prog.x(input_qubit[3])
prog.cx(input_qubit[0],input_qubit[3])
prog.cx(input_qubit[0],input_qubit[3])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
prog.y(input_qubit[3])
prog.h(input_qubit[0])
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.cx(input_qubit[0],input_qubit[3])
prog.x(input_qubit[3])
prog.h(input_qubit[3])
prog.cz(input_qubit[0],input_qubit[3])
prog.h(input_qubit[3])
prog.cx(input_qubit[3],input_qubit[0])
prog.h(input_qubit[0])
prog.cz(input_qubit[3],input_qubit[0])
prog.h(input_qubit[0])
prog.z(input_qubit[3])
prog.cx(input_qubit[3],input_qubit[0])
prog.cx(input_qubit[3],input_qubit[0])
prog.h(input_qubit[3])
prog.h(input_qubit[0])
prog.y(input_qubit[2])
prog.y(input_qubit[2])
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = FakeVigo()
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy2042.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| true | true |
f71f513fecec1a24f3bc3562ef0e4939b4598d59 | 604 | py | Python | discord_styler/__init__.py | miaowware/discord-styled-text | 9e02375b0ba947628bf7a7c853efc433f74d9373 | [
"BSD-3-Clause"
] | 1 | 2022-01-23T23:26:53.000Z | 2022-01-23T23:26:53.000Z | discord_styler/__init__.py | miaowware/discord-styled-text | 9e02375b0ba947628bf7a7c853efc433f74d9373 | [
"BSD-3-Clause"
] | 3 | 2021-08-28T01:46:36.000Z | 2021-09-07T02:59:03.000Z | discord_styler/__init__.py | miaowware/discord-styled-text | 9e02375b0ba947628bf7a7c853efc433f74d9373 | [
"BSD-3-Clause"
] | null | null | null | """
discord-styled-text
---
A small library to style text for Discord without having to remember any syntax
Copyright 2021 classabbyamp, 0x5c
Released under the terms of the BSD 3-Clause license.
"""
from .__info__ import __version__
from .styler import StyledText, Italic, Bold, Underline, Strikethrough, InlineCode, Spoiler, BlockQuote
from .styler import CodeBlock
from .styler import TitledURL, NonEmbeddingURL
from .styler import MentionABC, UserMention, RoleMention, ChannelMention
from .styler import TimeStyle, TimeStamp
from .escape import escape_markdown, escape_mentions, escape_everything
| 33.555556 | 103 | 0.816225 |
from .__info__ import __version__
from .styler import StyledText, Italic, Bold, Underline, Strikethrough, InlineCode, Spoiler, BlockQuote
from .styler import CodeBlock
from .styler import TitledURL, NonEmbeddingURL
from .styler import MentionABC, UserMention, RoleMention, ChannelMention
from .styler import TimeStyle, TimeStamp
from .escape import escape_markdown, escape_mentions, escape_everything
| true | true |
f71f51b989d608434d95424eaab6a007063a211a | 27,424 | py | Python | Multiagent/pacman.py | zengziyunthomas/Artifical-Intelligence | 4862a65bc8743e89b3c92d94eeca973f8b1851f3 | [
"MIT"
] | 1 | 2022-01-07T08:03:55.000Z | 2022-01-07T08:03:55.000Z | Multiagent/pacman.py | zengziyunthomas/Artifical-Intelligence | 4862a65bc8743e89b3c92d94eeca973f8b1851f3 | [
"MIT"
] | null | null | null | Multiagent/pacman.py | zengziyunthomas/Artifical-Intelligence | 4862a65bc8743e89b3c92d94eeca973f8b1851f3 | [
"MIT"
] | null | null | null | # pacman.py
# ---------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
"""
Pacman.py holds the logic for the classic pacman game along with the main
code to run a game. This file is divided into three sections:
(i) Your interface to the pacman world:
Pacman is a complex environment. You probably don't want to
read through all of the code we wrote to make the game runs
correctly. This section contains the parts of the code
that you will need to understand in order to complete the
project. There is also some code in game.py that you should
understand.
(ii) The hidden secrets of pacman:
This section contains all of the logic code that the pacman
environment uses to decide who can move where, who dies when
things collide, etc. You shouldn't need to read this section
of code, but you can if you want.
(iii) Framework to start a game:
The final section contains the code for reading the command
you use to set up the game, then starting up a new game, along with
linking in all the external parts (agent functions, graphics).
Check this section out to see all the options available to you.
To play your first game, type 'python pacman.py' from the command line.
The keys are 'a', 's', 'd', and 'w' to move (or arrow keys). Have fun!
"""
from game import GameStateData
from game import Game
from game import Directions
from game import Actions
from util import nearestPoint
from util import manhattanDistance
import util
import layout
import sys
import types
import time
import random
import os
###################################################
# YOUR INTERFACE TO THE PACMAN WORLD: A GameState #
###################################################
class GameState:
"""
A GameState specifies the full game state, including the food, capsules,
agent configurations and score changes.
GameStates are used by the Game object to capture the actual state of the game and
can be used by agents to reason about the game.
Much of the information in a GameState is stored in a GameStateData object. We
strongly suggest that you access that data via the accessor methods below rather
than referring to the GameStateData object directly.
Note that in classic Pacman, Pacman is always agent 0.
"""
####################################################
# Accessor methods: use these to access state data #
####################################################
# static variable keeps track of which states have had getLegalActions called
explored = set()
def getAndResetExplored():
tmp = GameState.explored.copy()
GameState.explored = set()
return tmp
getAndResetExplored = staticmethod(getAndResetExplored)
def getLegalActions(self, agentIndex=0):
"""
Returns the legal actions for the agent specified.
"""
# GameState.explored.add(self)
if self.isWin() or self.isLose():
return []
if agentIndex == 0: # Pacman is moving
return PacmanRules.getLegalActions(self)
else:
return GhostRules.getLegalActions(self, agentIndex)
def getNextState(self, agentIndex, action):
"""
Returns the child state after the specified agent takes the action.
"""
# Check that children exist
if self.isWin() or self.isLose():
raise Exception('Can\'t generate a child of a terminal state.')
# Copy current state
state = GameState(self)
# Let agent's logic deal with its action's effects on the board
if agentIndex == 0: # Pacman is moving
state.data._eaten = [False for i in range(state.getNumAgents())]
PacmanRules.applyAction(state, action)
else: # A ghost is moving
GhostRules.applyAction(state, action, agentIndex)
# Time passes
if agentIndex == 0:
state.data.scoreChange += -TIME_PENALTY # Penalty for waiting around
else:
GhostRules.decrementTimer(state.data.agentStates[agentIndex])
# Resolve multi-agent effects
GhostRules.checkDeath(state, agentIndex)
# Book keeping
state.data._agentMoved = agentIndex
state.data.score += state.data.scoreChange
GameState.explored.add(self)
GameState.explored.add(state)
return state
def getLegalPacmanActions(self):
return self.getLegalActions(0)
def getPacmanNextState(self, action):
"""
Generates the child state after the specified pacman move
"""
return self.getNextState(0, action)
def getPacmanState(self):
"""
Returns an AgentState object for pacman (in game.py)
state.pos gives the current position
state.direction gives the travel vector
"""
return self.data.agentStates[0].copy()
def getPacmanPosition(self):
return self.data.agentStates[0].getPosition()
def getGhostStates(self):
return self.data.agentStates[1:]
def getGhostState(self, agentIndex):
if agentIndex == 0 or agentIndex >= self.getNumAgents():
raise Exception("Invalid index passed to getGhostState")
return self.data.agentStates[agentIndex]
def getGhostPosition(self, agentIndex):
if agentIndex == 0:
raise Exception("Pacman's index passed to getGhostPosition")
return self.data.agentStates[agentIndex].getPosition()
def getGhostPositions(self):
return [s.getPosition() for s in self.getGhostStates()]
def getNumAgents(self):
return len(self.data.agentStates)
def getScore(self):
return float(self.data.score)
def getCapsules(self):
"""
Returns a list of positions (x,y) of the remaining capsules.
"""
return self.data.capsules
def getNumFood(self):
return self.data.food.count()
def getFood(self):
"""
Returns a Grid of boolean food indicator variables.
Grids can be accessed via list notation, so to check
if there is food at (x,y), just call
currentFood = state.getFood()
if currentFood[x][y] == True: ...
"""
return self.data.food
def getWalls(self):
"""
Returns a Grid of boolean wall indicator variables.
Grids can be accessed via list notation, so to check
if there is a wall at (x,y), just call
walls = state.getWalls()
if walls[x][y] == True: ...
"""
return self.data.layout.walls
def hasFood(self, x, y):
return self.data.food[x][y]
def hasWall(self, x, y):
return self.data.layout.walls[x][y]
def isLose(self):
return self.data._lose
def isWin(self):
return self.data._win
#############################################
# Helper methods: #
# You shouldn't need to call these directly #
#############################################
def __init__(self, prevState=None):
"""
Generates a new state by copying information from its predecessor.
"""
if prevState != None: # Initial state
self.data = GameStateData(prevState.data)
else:
self.data = GameStateData()
def deepCopy(self):
state = GameState(self)
state.data = self.data.deepCopy()
return state
def __eq__(self, other):
"""
Allows two states to be compared.
"""
return hasattr(other, 'data') and self.data == other.data
def __hash__(self):
"""
Allows states to be keys of dictionaries.
"""
return hash(self.data)
def __str__(self):
return str(self.data)
def initialize(self, layout, numGhostAgents=1000):
"""
Creates an initial game state from a layout array (see layout.py).
"""
self.data.initialize(layout, numGhostAgents)
############################################################################
# THE HIDDEN SECRETS OF PACMAN #
# #
# You shouldn't need to look through the code in this section of the file. #
############################################################################
SCARED_TIME = 40 # Moves ghosts are scared
COLLISION_TOLERANCE = 0.7 # How close ghosts must be to Pacman to kill
TIME_PENALTY = 1 # Number of points lost each round
class ClassicGameRules:
"""
These game rules manage the control flow of a game, deciding when
and how the game starts and ends.
"""
def __init__(self, timeout=30):
self.timeout = timeout
def newGame(self, layout, pacmanAgent, ghostAgents, display, quiet=False, catchExceptions=False):
agents = [pacmanAgent] + ghostAgents[:layout.getNumGhosts()]
initState = GameState()
initState.initialize(layout, len(ghostAgents))
game = Game(agents, display, self, catchExceptions=catchExceptions)
game.state = initState
self.initialState = initState.deepCopy()
self.quiet = quiet
return game
def process(self, state, game):
"""
Checks to see whether it is time to end the game.
"""
if state.isWin():
self.win(state, game)
if state.isLose():
self.lose(state, game)
def win(self, state, game):
if not self.quiet:
print("Pacman emerges victorious! Score: %d" % state.data.score)
game.gameOver = True
def lose(self, state, game):
if not self.quiet:
print("Pacman died! Score: %d" % state.data.score)
game.gameOver = True
def getProgress(self, game):
return float(game.state.getNumFood()) / self.initialState.getNumFood()
def agentCrash(self, game, agentIndex):
if agentIndex == 0:
print("Pacman crashed")
else:
print("A ghost crashed")
def getMaxTotalTime(self, agentIndex):
return self.timeout
def getMaxStartupTime(self, agentIndex):
return self.timeout
def getMoveWarningTime(self, agentIndex):
return self.timeout
def getMoveTimeout(self, agentIndex):
return self.timeout
def getMaxTimeWarnings(self, agentIndex):
return 0
class PacmanRules:
"""
These functions govern how pacman interacts with his environment under
the classic game rules.
"""
PACMAN_SPEED = 1
def getLegalActions(state):
"""
Returns a list of possible actions.
"""
return Actions.getPossibleActions(state.getPacmanState().configuration, state.data.layout.walls)
getLegalActions = staticmethod(getLegalActions)
def applyAction(state, action):
"""
Edits the state to reflect the results of the action.
"""
legal = PacmanRules.getLegalActions(state)
if action not in legal:
raise Exception("Illegal action " + str(action))
pacmanState = state.data.agentStates[0]
# Update Configuration
vector = Actions.directionToVector(action, PacmanRules.PACMAN_SPEED)
pacmanState.configuration = pacmanState.configuration.getNextState(
vector)
# Eat
next = pacmanState.configuration.getPosition()
nearest = nearestPoint(next)
if manhattanDistance(nearest, next) <= 0.5:
# Remove food
PacmanRules.consume(nearest, state)
applyAction = staticmethod(applyAction)
def consume(position, state):
x, y = position
# Eat food
if state.data.food[x][y]:
state.data.scoreChange += 10
state.data.food = state.data.food.copy()
state.data.food[x][y] = False
state.data._foodEaten = position
# TODO: cache numFood?
numFood = state.getNumFood()
if numFood == 0 and not state.data._lose:
state.data.scoreChange += 500
state.data._win = True
# Eat capsule
if(position in state.getCapsules()):
state.data.capsules.remove(position)
state.data._capsuleEaten = position
# Reset all ghosts' scared timers
for index in range(1, len(state.data.agentStates)):
state.data.agentStates[index].scaredTimer = SCARED_TIME
consume = staticmethod(consume)
class GhostRules:
"""
These functions dictate how ghosts interact with their environment.
"""
GHOST_SPEED = 1.0
def getLegalActions(state, ghostIndex):
"""
Ghosts cannot stop, and cannot turn around unless they
reach a dead end, but can turn 90 degrees at intersections.
"""
conf = state.getGhostState(ghostIndex).configuration
possibleActions = Actions.getPossibleActions(
conf, state.data.layout.walls)
reverse = Actions.reverseDirection(conf.direction)
if Directions.STOP in possibleActions:
possibleActions.remove(Directions.STOP)
if reverse in possibleActions and len(possibleActions) > 1:
possibleActions.remove(reverse)
return possibleActions
getLegalActions = staticmethod(getLegalActions)
def applyAction(state, action, ghostIndex):
legal = GhostRules.getLegalActions(state, ghostIndex)
if action not in legal:
raise Exception("Illegal ghost action " + str(action))
ghostState = state.data.agentStates[ghostIndex]
speed = GhostRules.GHOST_SPEED
if ghostState.scaredTimer > 0:
speed /= 2.0
vector = Actions.directionToVector(action, speed)
ghostState.configuration = ghostState.configuration.getNextState(
vector)
applyAction = staticmethod(applyAction)
def decrementTimer(ghostState):
timer = ghostState.scaredTimer
if timer == 1:
ghostState.configuration.pos = nearestPoint(
ghostState.configuration.pos)
ghostState.scaredTimer = max(0, timer - 1)
decrementTimer = staticmethod(decrementTimer)
def checkDeath(state, agentIndex):
pacmanPosition = state.getPacmanPosition()
if agentIndex == 0: # Pacman just moved; Anyone can kill him
for index in range(1, len(state.data.agentStates)):
ghostState = state.data.agentStates[index]
ghostPosition = ghostState.configuration.getPosition()
if GhostRules.canKill(pacmanPosition, ghostPosition):
GhostRules.collide(state, ghostState, index)
else:
ghostState = state.data.agentStates[agentIndex]
ghostPosition = ghostState.configuration.getPosition()
if GhostRules.canKill(pacmanPosition, ghostPosition):
GhostRules.collide(state, ghostState, agentIndex)
checkDeath = staticmethod(checkDeath)
def collide(state, ghostState, agentIndex):
if ghostState.scaredTimer > 0:
state.data.scoreChange += 200
GhostRules.placeGhost(state, ghostState)
ghostState.scaredTimer = 0
# Added for first-person
state.data._eaten[agentIndex] = True
else:
if not state.data._win:
state.data.scoreChange -= 500
state.data._lose = True
collide = staticmethod(collide)
def canKill(pacmanPosition, ghostPosition):
return manhattanDistance(ghostPosition, pacmanPosition) <= COLLISION_TOLERANCE
canKill = staticmethod(canKill)
def placeGhost(state, ghostState):
ghostState.configuration = ghostState.start
placeGhost = staticmethod(placeGhost)
#############################
# FRAMEWORK TO START A GAME #
#############################
def default(str):
return str + ' [Default: %default]'
def parseAgentArgs(str):
if str == None:
return {}
pieces = str.split(',')
opts = {}
for p in pieces:
if '=' in p:
key, val = p.split('=')
else:
key, val = p, 1
opts[key] = val
return opts
def readCommand(argv):
"""
Processes the command used to run pacman from the command line.
"""
from optparse import OptionParser
usageStr = """
USAGE: python pacman.py <options>
EXAMPLES: (1) python pacman.py
- starts an interactive game
(2) python pacman.py --layout smallClassic --zoom 2
OR python pacman.py -l smallClassic -z 2
- starts an interactive game on a smaller board, zoomed in
"""
parser = OptionParser(usageStr)
parser.add_option('-n', '--numGames', dest='numGames', type='int',
help=default('the number of GAMES to play'), metavar='GAMES', default=1)
parser.add_option('-l', '--layout', dest='layout',
help=default(
'the LAYOUT_FILE from which to load the map layout'),
metavar='LAYOUT_FILE', default='mediumClassic')
parser.add_option('-p', '--pacman', dest='pacman',
help=default(
'the agent TYPE in the pacmanAgents module to use'),
metavar='TYPE', default='KeyboardAgent')
parser.add_option('-t', '--textGraphics', action='store_true', dest='textGraphics',
help='Display output as text only', default=False)
parser.add_option('-q', '--quietTextGraphics', action='store_true', dest='quietGraphics',
help='Generate minimal output and no graphics', default=False)
parser.add_option('-g', '--ghosts', dest='ghost',
help=default(
'the ghost agent TYPE in the ghostAgents module to use'),
metavar='TYPE', default='RandomGhost')
parser.add_option('-k', '--numghosts', type='int', dest='numGhosts',
help=default('The maximum number of ghosts to use'), default=4)
parser.add_option('-z', '--zoom', type='float', dest='zoom',
help=default('Zoom the size of the graphics window'), default=1.0)
parser.add_option('-f', '--fixRandomSeed', action='store_true', dest='fixRandomSeed',
help='Fixes the random seed to always play the same game', default=False)
parser.add_option('-r', '--recordActions', action='store_true', dest='record',
help='Writes game histories to a file (named by the time they were played)', default=False)
parser.add_option('--replay', dest='gameToReplay',
help='A recorded game file (pickle) to replay', default=None)
parser.add_option('-a', '--agentArgs', dest='agentArgs',
help='Comma separated values sent to agent. e.g. "opt1=val1,opt2,opt3=val3"')
parser.add_option('-x', '--numTraining', dest='numTraining', type='int',
help=default('How many episodes are training (suppresses output)'), default=0)
parser.add_option('--frameTime', dest='frameTime', type='float',
help=default('Time to delay between frames; <0 means keyboard'), default=0.1)
parser.add_option('-c', '--catchExceptions', action='store_true', dest='catchExceptions',
help='Turns on exception handling and timeouts during games', default=False)
parser.add_option('--timeout', dest='timeout', type='int',
help=default('Maximum length of time an agent can spend computing in a single game'), default=30)
options, otherjunk = parser.parse_args(argv)
if len(otherjunk) != 0:
raise Exception('Command line input not understood: ' + str(otherjunk))
args = dict()
# Fix the random seed
if options.fixRandomSeed:
random.seed('cs188')
# Choose a layout
args['layout'] = layout.getLayout(options.layout)
if args['layout'] == None:
raise Exception("The layout " + options.layout + " cannot be found")
# Choose a Pacman agent
noKeyboard = options.gameToReplay == None and (
options.textGraphics or options.quietGraphics)
pacmanType = loadAgent(options.pacman, noKeyboard)
agentOpts = parseAgentArgs(options.agentArgs)
if options.numTraining > 0:
args['numTraining'] = options.numTraining
if 'numTraining' not in agentOpts:
agentOpts['numTraining'] = options.numTraining
pacman = pacmanType(**agentOpts) # Instantiate Pacman with agentArgs
args['pacman'] = pacman
# Don't display training games
if 'numTrain' in agentOpts:
options.numQuiet = int(agentOpts['numTrain'])
options.numIgnore = int(agentOpts['numTrain'])
# Choose a ghost agent
ghostType = loadAgent(options.ghost, noKeyboard)
args['ghosts'] = [ghostType(i+1) for i in range(options.numGhosts)]
# Choose a display format
if options.quietGraphics:
import textDisplay
args['display'] = textDisplay.NullGraphics()
elif options.textGraphics:
import textDisplay
textDisplay.SLEEP_TIME = options.frameTime
args['display'] = textDisplay.PacmanGraphics()
else:
import graphicsDisplay
args['display'] = graphicsDisplay.PacmanGraphics(
options.zoom, frameTime=options.frameTime)
args['numGames'] = options.numGames
args['record'] = options.record
args['catchExceptions'] = options.catchExceptions
args['timeout'] = options.timeout
# Special case: recorded games don't use the runGames method or args structure
if options.gameToReplay != None:
print('Replaying recorded game %s.' % options.gameToReplay)
import pickle
f = open(options.gameToReplay)
try:
recorded = pickle.load(f)
finally:
f.close()
recorded['display'] = args['display']
replayGame(**recorded)
sys.exit(0)
return args
def loadAgent(pacman, nographics):
# Looks through all pythonPath Directories for the right module,
pythonPathStr = os.path.expandvars("$PYTHONPATH")
if pythonPathStr.find(';') == -1:
pythonPathDirs = pythonPathStr.split(':')
else:
pythonPathDirs = pythonPathStr.split(';')
pythonPathDirs.append('.')
for moduleDir in pythonPathDirs:
if not os.path.isdir(moduleDir):
continue
moduleNames = [f for f in os.listdir(
moduleDir) if f.endswith('gents.py')]
for modulename in moduleNames:
try:
module = __import__(modulename[:-3])
except ImportError:
continue
if pacman in dir(module):
if nographics and modulename == 'keyboardAgents.py':
raise Exception(
'Using the keyboard requires graphics (not text display)')
return getattr(module, pacman)
raise Exception('The agent ' + pacman +
' is not specified in any *Agents.py.')
def replayGame(layout, actions, display):
import pacmanAgents
import ghostAgents
rules = ClassicGameRules()
agents = [pacmanAgents.GreedyAgent()] + [ghostAgents.RandomGhost(i+1)
for i in range(layout.getNumGhosts())]
game = rules.newGame(layout, agents[0], agents[1:], display)
state = game.state
display.initialize(state.data)
for action in actions:
# Execute the action
state = state.getNextState(*action)
# Change the display
display.update(state.data)
# Allow for game specific conditions (winning, losing, etc.)
rules.process(state, game)
display.finish()
def runGames(layout, pacman, ghosts, display, numGames, record, numTraining=0, catchExceptions=False, timeout=30):
import __main__
__main__.__dict__['_display'] = display
rules = ClassicGameRules(timeout)
games = []
for i in range(numGames):
beQuiet = i < numTraining
if beQuiet:
# Suppress output and graphics
import textDisplay
gameDisplay = textDisplay.NullGraphics()
rules.quiet = True
else:
gameDisplay = display
rules.quiet = False
game = rules.newGame(layout, pacman, ghosts,
gameDisplay, beQuiet, catchExceptions)
game.run()
if not beQuiet:
games.append(game)
if record:
import time
import pickle
fname = ('recorded-game-%d' % (i + 1)) + \
'-'.join([str(t) for t in time.localtime()[1:6]])
f = file(fname, 'w')
components = {'layout': layout, 'actions': game.moveHistory}
pickle.dump(components, f)
f.close()
if (numGames-numTraining) > 0:
scores = [game.state.getScore() for game in games]
wins = [game.state.isWin() for game in games]
winRate = wins.count(True) / float(len(wins))
print('Average Score:', sum(scores) / float(len(scores)))
print('Scores: ', ', '.join([str(score) for score in scores]))
print('Win Rate: %d/%d (%.2f)' %
(wins.count(True), len(wins), winRate))
print('Record: ', ', '.join(
[['Loss', 'Win'][int(w)] for w in wins]))
return games
if __name__ == '__main__':
"""
The main function called when pacman.py is run
from the command line:
> python pacman.py
See the usage string for more details.
> python pacman.py --help
"""
args = readCommand(sys.argv[1:]) # Get game components based on input
runGames(**args)
# import cProfile
# cProfile.run("runGames( **args )")
pass
| 37.109608 | 120 | 0.592656 |
from game import GameStateData
from game import Game
from game import Directions
from game import Actions
from util import nearestPoint
from util import manhattanDistance
import util
import layout
import sys
import types
import time
import random
import os
imer = staticmethod(decrementTimer)
def checkDeath(state, agentIndex):
pacmanPosition = state.getPacmanPosition()
if agentIndex == 0: # Pacman just moved; Anyone can kill him
for index in range(1, len(state.data.agentStates)):
ghostState = state.data.agentStates[index]
ghostPosition = ghostState.configuration.getPosition()
if GhostRules.canKill(pacmanPosition, ghostPosition):
GhostRules.collide(state, ghostState, index)
else:
ghostState = state.data.agentStates[agentIndex]
ghostPosition = ghostState.configuration.getPosition()
if GhostRules.canKill(pacmanPosition, ghostPosition):
GhostRules.collide(state, ghostState, agentIndex)
checkDeath = staticmethod(checkDeath)
def collide(state, ghostState, agentIndex):
if ghostState.scaredTimer > 0:
state.data.scoreChange += 200
GhostRules.placeGhost(state, ghostState)
ghostState.scaredTimer = 0
# Added for first-person
state.data._eaten[agentIndex] = True
else:
if not state.data._win:
state.data.scoreChange -= 500
state.data._lose = True
collide = staticmethod(collide)
def canKill(pacmanPosition, ghostPosition):
return manhattanDistance(ghostPosition, pacmanPosition) <= COLLISION_TOLERANCE
canKill = staticmethod(canKill)
def placeGhost(state, ghostState):
ghostState.configuration = ghostState.start
placeGhost = staticmethod(placeGhost)
#############################
# FRAMEWORK TO START A GAME #
#############################
def default(str):
return str + ' [Default: %default]'
def parseAgentArgs(str):
if str == None:
return {}
pieces = str.split(',')
opts = {}
for p in pieces:
if '=' in p:
key, val = p.split('=')
else:
key, val = p, 1
opts[key] = val
return opts
def readCommand(argv):
from optparse import OptionParser
usageStr = """
USAGE: python pacman.py <options>
EXAMPLES: (1) python pacman.py
- starts an interactive game
(2) python pacman.py --layout smallClassic --zoom 2
OR python pacman.py -l smallClassic -z 2
- starts an interactive game on a smaller board, zoomed in
"""
parser = OptionParser(usageStr)
parser.add_option('-n', '--numGames', dest='numGames', type='int',
help=default('the number of GAMES to play'), metavar='GAMES', default=1)
parser.add_option('-l', '--layout', dest='layout',
help=default(
'the LAYOUT_FILE from which to load the map layout'),
metavar='LAYOUT_FILE', default='mediumClassic')
parser.add_option('-p', '--pacman', dest='pacman',
help=default(
'the agent TYPE in the pacmanAgents module to use'),
metavar='TYPE', default='KeyboardAgent')
parser.add_option('-t', '--textGraphics', action='store_true', dest='textGraphics',
help='Display output as text only', default=False)
parser.add_option('-q', '--quietTextGraphics', action='store_true', dest='quietGraphics',
help='Generate minimal output and no graphics', default=False)
parser.add_option('-g', '--ghosts', dest='ghost',
help=default(
'the ghost agent TYPE in the ghostAgents module to use'),
metavar='TYPE', default='RandomGhost')
parser.add_option('-k', '--numghosts', type='int', dest='numGhosts',
help=default('The maximum number of ghosts to use'), default=4)
parser.add_option('-z', '--zoom', type='float', dest='zoom',
help=default('Zoom the size of the graphics window'), default=1.0)
parser.add_option('-f', '--fixRandomSeed', action='store_true', dest='fixRandomSeed',
help='Fixes the random seed to always play the same game', default=False)
parser.add_option('-r', '--recordActions', action='store_true', dest='record',
help='Writes game histories to a file (named by the time they were played)', default=False)
parser.add_option('--replay', dest='gameToReplay',
help='A recorded game file (pickle) to replay', default=None)
parser.add_option('-a', '--agentArgs', dest='agentArgs',
help='Comma separated values sent to agent. e.g. "opt1=val1,opt2,opt3=val3"')
parser.add_option('-x', '--numTraining', dest='numTraining', type='int',
help=default('How many episodes are training (suppresses output)'), default=0)
parser.add_option('--frameTime', dest='frameTime', type='float',
help=default('Time to delay between frames; <0 means keyboard'), default=0.1)
parser.add_option('-c', '--catchExceptions', action='store_true', dest='catchExceptions',
help='Turns on exception handling and timeouts during games', default=False)
parser.add_option('--timeout', dest='timeout', type='int',
help=default('Maximum length of time an agent can spend computing in a single game'), default=30)
options, otherjunk = parser.parse_args(argv)
if len(otherjunk) != 0:
raise Exception('Command line input not understood: ' + str(otherjunk))
args = dict()
# Fix the random seed
if options.fixRandomSeed:
random.seed('cs188')
# Choose a layout
args['layout'] = layout.getLayout(options.layout)
if args['layout'] == None:
raise Exception("The layout " + options.layout + " cannot be found")
# Choose a Pacman agent
noKeyboard = options.gameToReplay == None and (
options.textGraphics or options.quietGraphics)
pacmanType = loadAgent(options.pacman, noKeyboard)
agentOpts = parseAgentArgs(options.agentArgs)
if options.numTraining > 0:
args['numTraining'] = options.numTraining
if 'numTraining' not in agentOpts:
agentOpts['numTraining'] = options.numTraining
pacman = pacmanType(**agentOpts) # Instantiate Pacman with agentArgs
args['pacman'] = pacman
# Don't display training games
if 'numTrain' in agentOpts:
options.numQuiet = int(agentOpts['numTrain'])
options.numIgnore = int(agentOpts['numTrain'])
ghostType = loadAgent(options.ghost, noKeyboard)
args['ghosts'] = [ghostType(i+1) for i in range(options.numGhosts)]
if options.quietGraphics:
import textDisplay
args['display'] = textDisplay.NullGraphics()
elif options.textGraphics:
import textDisplay
textDisplay.SLEEP_TIME = options.frameTime
args['display'] = textDisplay.PacmanGraphics()
else:
import graphicsDisplay
args['display'] = graphicsDisplay.PacmanGraphics(
options.zoom, frameTime=options.frameTime)
args['numGames'] = options.numGames
args['record'] = options.record
args['catchExceptions'] = options.catchExceptions
args['timeout'] = options.timeout
if options.gameToReplay != None:
print('Replaying recorded game %s.' % options.gameToReplay)
import pickle
f = open(options.gameToReplay)
try:
recorded = pickle.load(f)
finally:
f.close()
recorded['display'] = args['display']
replayGame(**recorded)
sys.exit(0)
return args
def loadAgent(pacman, nographics):
# Looks through all pythonPath Directories for the right module,
pythonPathStr = os.path.expandvars("$PYTHONPATH")
if pythonPathStr.find(';') == -1:
pythonPathDirs = pythonPathStr.split(':')
else:
pythonPathDirs = pythonPathStr.split(';')
pythonPathDirs.append('.')
for moduleDir in pythonPathDirs:
if not os.path.isdir(moduleDir):
continue
moduleNames = [f for f in os.listdir(
moduleDir) if f.endswith('gents.py')]
for modulename in moduleNames:
try:
module = __import__(modulename[:-3])
except ImportError:
continue
if pacman in dir(module):
if nographics and modulename == 'keyboardAgents.py':
raise Exception(
'Using the keyboard requires graphics (not text display)')
return getattr(module, pacman)
raise Exception('The agent ' + pacman +
' is not specified in any *Agents.py.')
def replayGame(layout, actions, display):
import pacmanAgents
import ghostAgents
rules = ClassicGameRules()
agents = [pacmanAgents.GreedyAgent()] + [ghostAgents.RandomGhost(i+1)
for i in range(layout.getNumGhosts())]
game = rules.newGame(layout, agents[0], agents[1:], display)
state = game.state
display.initialize(state.data)
for action in actions:
# Execute the action
state = state.getNextState(*action)
# Change the display
display.update(state.data)
# Allow for game specific conditions (winning, losing, etc.)
rules.process(state, game)
display.finish()
def runGames(layout, pacman, ghosts, display, numGames, record, numTraining=0, catchExceptions=False, timeout=30):
import __main__
__main__.__dict__['_display'] = display
rules = ClassicGameRules(timeout)
games = []
for i in range(numGames):
beQuiet = i < numTraining
if beQuiet:
# Suppress output and graphics
import textDisplay
gameDisplay = textDisplay.NullGraphics()
rules.quiet = True
else:
gameDisplay = display
rules.quiet = False
game = rules.newGame(layout, pacman, ghosts,
gameDisplay, beQuiet, catchExceptions)
game.run()
if not beQuiet:
games.append(game)
if record:
import time
import pickle
fname = ('recorded-game-%d' % (i + 1)) + \
'-'.join([str(t) for t in time.localtime()[1:6]])
f = file(fname, 'w')
components = {'layout': layout, 'actions': game.moveHistory}
pickle.dump(components, f)
f.close()
if (numGames-numTraining) > 0:
scores = [game.state.getScore() for game in games]
wins = [game.state.isWin() for game in games]
winRate = wins.count(True) / float(len(wins))
print('Average Score:', sum(scores) / float(len(scores)))
print('Scores: ', ', '.join([str(score) for score in scores]))
print('Win Rate: %d/%d (%.2f)' %
(wins.count(True), len(wins), winRate))
print('Record: ', ', '.join(
[['Loss', 'Win'][int(w)] for w in wins]))
return games
if __name__ == '__main__':
args = readCommand(sys.argv[1:]) # Get game components based on input
runGames(**args)
# import cProfile
# cProfile.run("runGames( **args )")
pass
| true | true |
f71f51cf95ba54a5f6398ad0ae300442232506f4 | 3,554 | py | Python | examples/Yellow_Sea/make_YELLOW_grd_v1.py | bilgetutak/pyroms | 3b0550f26f4ac181b7812e14a7167cd1ca0797f0 | [
"BSD-3-Clause"
] | 75 | 2016-04-05T07:15:57.000Z | 2022-03-04T22:49:54.000Z | examples/Yellow_Sea/make_YELLOW_grd_v1.py | hadfieldnz/pyroms-mgh | cd0fe39075825f97a7caf64e2c4c5a19f23302fd | [
"BSD-3-Clause"
] | 27 | 2017-02-26T04:27:49.000Z | 2021-12-01T17:26:56.000Z | examples/Yellow_Sea/make_YELLOW_grd_v1.py | hadfieldnz/pyroms-mgh | cd0fe39075825f97a7caf64e2c4c5a19f23302fd | [
"BSD-3-Clause"
] | 56 | 2016-05-11T06:19:14.000Z | 2022-03-22T19:04:17.000Z | import os
from pyroms import _iso
import numpy as np
from mpl_toolkits.basemap import Basemap, shiftgrid
from scipy.interpolate import griddata
import matplotlib.colors as colors
from scipy.signal import medfilt2d
import netCDF4
import pyroms
from bathy_smoother import *
# Grid dimension
Lm = 140
Mm = 120
lon0=117.5 ; lat0 = 41.
lon1=117.5 ; lat1 = 34.5
lon2 = 127. ; lat2 = 34.5
lon3 = 127. ; lat3 = 41.
map = Basemap(projection='lcc', lat_0=35., lat_1=30., lat_2=40, lon_0 =123, \
width=2000000, height=2000000, resolution='i')
lonp = np.array([lon0, lon1, lon2, lon3])
latp = np.array([lat0, lat1, lat2, lat3])
beta = np.array([1, 1, 1, 1])
#generate the new grid
# Do this if you aren't going to move the grid corners interactively.
hgrd = pyroms.grid.Gridgen(lonp, latp, beta, (Mm+3, Lm+3), proj=map)
# Do this if you are going to use the Boundary Interactor
#map.drawcoastlines()
#xp, yp = map(lonp, latp)
#bry = pyroms.hgrid.BoundaryInteractor(xp, yp, beta, shp=(Mm+3,Lm+3), proj=map)
#hgrd=bry.grd
lonv, latv = list(map(hgrd.x_vert, hgrd.y_vert, inverse=True))
hgrd = pyroms.grid.CGrid_geo(lonv, latv, map)
# generate the mask
#for verts in map.coastsegs:
# hgrd.mask_polygon(verts)
# alternate version from johan.navarro.padron
for xx,yy in map.coastpolygons:
xa = np.array(xx, np.float32)
ya = np.array(yy,np.float32)
vv = np.zeros((xa.shape[0],2))
vv[:, 0] = xa
vv[:, 1] = ya
hgrd.mask_polygon(vv,mask_value=0)
# Edit the land mask interactively.
#pyroms.grid.edit_mask_mesh(hgrd, proj=map)
#edit_mask_mesh_ij is a faster version using imshow... but no map projection.
coast = pyroms.utility.get_coast_from_map(map)
pyroms.grid.edit_mask_mesh_ij(hgrd, coast=coast)
#### Use the following to interpolate from etopo2 bathymetry.
# generate the bathy
# read in topo data (on a regular lat/lon grid)
# this topo come with basemap so you should have it on your laptop.
# just update datadir with the appropriate path
# you can get this data from matplolib svn with
# svn co https://matplotlib.svn.sourceforge.net/svnroot/matplotlib/trunk/htdocs/screenshots/data/"
datadir = 'data/'
topo = np.loadtxt(os.path.join(datadir, 'etopo20data.gz'))
lons = np.loadtxt(os.path.join(datadir, 'etopo20lons.gz'))
lats = np.loadtxt(os.path.join(datadir, 'etopo20lats.gz'))
# depth positive
topo = -topo
# fix minimum depth
hmin = 5
topo = np.where(topo < hmin, hmin, topo)
# interpolate new bathymetry
lon, lat = np.meshgrid(lons, lats)
h = griddata((lon.flat,lat.flat),topo.flat,(hgrd.lon_rho,hgrd.lat_rho), method='linear')
# insure that depth is always deeper than hmin
h = np.where(h < hmin, hmin, h)
# set depth to hmin where masked
idx = np.where(hgrd.mask_rho == 0)
h[idx] = hmin
# save raw bathymetry
hraw = h.copy()
# check bathymetry roughness
RoughMat = bathy_tools.RoughnessMatrix(h, hgrd.mask_rho)
print('Max Roughness value is: ', RoughMat.max())
# smooth the raw bathy using the direct iterative method from Martinho and Batteen (2006)
rx0_max = 0.35
h = bathy_smoothing.smoothing_Positive_rx0(hgrd.mask_rho, h, rx0_max)
# check bathymetry roughness again
RoughMat = bathy_tools.RoughnessMatrix(h, hgrd.mask_rho)
print('Max Roughness value is: ', RoughMat.max())
# vertical coordinate
theta_b = 2
theta_s = 7.0
Tcline = 50
N = 30
vgrd = pyroms.vgrid.s_coordinate_4(h, theta_b, theta_s, Tcline, N, hraw=hraw)
# ROMS grid
grd_name = 'YELLOW'
grd = pyroms.grid.ROMS_Grid(grd_name, hgrd, vgrd)
# write grid to netcdf file
pyroms.grid.write_ROMS_grid(grd, filename='YELLOW_grd_v1.nc')
| 29.865546 | 98 | 0.728475 | import os
from pyroms import _iso
import numpy as np
from mpl_toolkits.basemap import Basemap, shiftgrid
from scipy.interpolate import griddata
import matplotlib.colors as colors
from scipy.signal import medfilt2d
import netCDF4
import pyroms
from bathy_smoother import *
Lm = 140
Mm = 120
lon0=117.5 ; lat0 = 41.
lon1=117.5 ; lat1 = 34.5
lon2 = 127. ; lat2 = 34.5
lon3 = 127. ; lat3 = 41.
map = Basemap(projection='lcc', lat_0=35., lat_1=30., lat_2=40, lon_0 =123, \
width=2000000, height=2000000, resolution='i')
lonp = np.array([lon0, lon1, lon2, lon3])
latp = np.array([lat0, lat1, lat2, lat3])
beta = np.array([1, 1, 1, 1])
hgrd = pyroms.grid.Gridgen(lonp, latp, beta, (Mm+3, Lm+3), proj=map)
# Do this if you are going to use the Boundary Interactor
#map.drawcoastlines()
#xp, yp = map(lonp, latp)
#bry = pyroms.hgrid.BoundaryInteractor(xp, yp, beta, shp=(Mm+3,Lm+3), proj=map)
#hgrd=bry.grd
lonv, latv = list(map(hgrd.x_vert, hgrd.y_vert, inverse=True))
hgrd = pyroms.grid.CGrid_geo(lonv, latv, map)
# generate the mask
#for verts in map.coastsegs:
# hgrd.mask_polygon(verts)
# alternate version from johan.navarro.padron
for xx,yy in map.coastpolygons:
xa = np.array(xx, np.float32)
ya = np.array(yy,np.float32)
vv = np.zeros((xa.shape[0],2))
vv[:, 0] = xa
vv[:, 1] = ya
hgrd.mask_polygon(vv,mask_value=0)
# Edit the land mask interactively.
#pyroms.grid.edit_mask_mesh(hgrd, proj=map)
#edit_mask_mesh_ij is a faster version using imshow... but no map projection.
coast = pyroms.utility.get_coast_from_map(map)
pyroms.grid.edit_mask_mesh_ij(hgrd, coast=coast)
#### Use the following to interpolate from etopo2 bathymetry.
# generate the bathy
# read in topo data (on a regular lat/lon grid)
# this topo come with basemap so you should have it on your laptop.
# just update datadir with the appropriate path
# you can get this data from matplolib svn with
# svn co https://matplotlib.svn.sourceforge.net/svnroot/matplotlib/trunk/htdocs/screenshots/data/"
datadir = 'data/'
topo = np.loadtxt(os.path.join(datadir, 'etopo20data.gz'))
lons = np.loadtxt(os.path.join(datadir, 'etopo20lons.gz'))
lats = np.loadtxt(os.path.join(datadir, 'etopo20lats.gz'))
# depth positive
topo = -topo
# fix minimum depth
hmin = 5
topo = np.where(topo < hmin, hmin, topo)
# interpolate new bathymetry
lon, lat = np.meshgrid(lons, lats)
h = griddata((lon.flat,lat.flat),topo.flat,(hgrd.lon_rho,hgrd.lat_rho), method='linear')
# insure that depth is always deeper than hmin
h = np.where(h < hmin, hmin, h)
# set depth to hmin where masked
idx = np.where(hgrd.mask_rho == 0)
h[idx] = hmin
# save raw bathymetry
hraw = h.copy()
# check bathymetry roughness
RoughMat = bathy_tools.RoughnessMatrix(h, hgrd.mask_rho)
print('Max Roughness value is: ', RoughMat.max())
# smooth the raw bathy using the direct iterative method from Martinho and Batteen (2006)
rx0_max = 0.35
h = bathy_smoothing.smoothing_Positive_rx0(hgrd.mask_rho, h, rx0_max)
# check bathymetry roughness again
RoughMat = bathy_tools.RoughnessMatrix(h, hgrd.mask_rho)
print('Max Roughness value is: ', RoughMat.max())
# vertical coordinate
theta_b = 2
theta_s = 7.0
Tcline = 50
N = 30
vgrd = pyroms.vgrid.s_coordinate_4(h, theta_b, theta_s, Tcline, N, hraw=hraw)
# ROMS grid
grd_name = 'YELLOW'
grd = pyroms.grid.ROMS_Grid(grd_name, hgrd, vgrd)
# write grid to netcdf file
pyroms.grid.write_ROMS_grid(grd, filename='YELLOW_grd_v1.nc')
| true | true |
f71f51f2b02de08ee56c301cd81086d983759417 | 4,082 | py | Python | tests/config/test_config_provider.py | sturmianseq/thundra-agent-python | 4cee02d790eb7b8e4dea4e2e9dcd1f67533b1c56 | [
"Apache-2.0"
] | 22 | 2018-03-05T20:02:46.000Z | 2021-04-09T12:00:18.000Z | tests/config/test_config_provider.py | sturmianseq/thundra-agent-python | 4cee02d790eb7b8e4dea4e2e9dcd1f67533b1c56 | [
"Apache-2.0"
] | 13 | 2018-03-26T07:57:57.000Z | 2021-06-29T14:22:52.000Z | tests/config/test_config_provider.py | thundra-io/thundra-agent-python | 448e18c17d8730c381b2e2a773782cf80c5a7cfb | [
"Apache-2.0"
] | 3 | 2021-08-07T14:19:23.000Z | 2021-12-08T15:35:40.000Z | import os
import pytest
from thundra.config.config_provider import ConfigProvider
@pytest.fixture()
def config_options():
return {
'config': {
'my': {
'key': 'my-value'
},
'lambda': {
'my': {
'key2': 'my-value2'
}
},
'thundra': {
'agent': {
'my': {
'key3': 'my-value3'
},
'lambda': {
'my': {
'key4': 'my-value4'
}
}
}
}
}
}
@pytest.fixture()
def options_with_different_type():
return {
'config': {
'thundra': {
'agent': {
'application': {
'className': 'TEST'
},
'debug': {
'enable': True
},
'lambda': {
'debugger.broker.port': 444
}
}
}
}
}
def test_config_from_environment_variable(monkeypatch):
monkeypatch.setitem(os.environ, 'THUNDRA_AGENT_TEST_KEY', 'test_value')
monkeypatch.setitem(os.environ, 'THUNDRA_AGENT_LAMBDA_TEST_KEY2', 'test_value2')
ConfigProvider.__init__()
monkeypatch.delitem(os.environ, 'THUNDRA_AGENT_TEST_KEY')
monkeypatch.delitem(os.environ, 'THUNDRA_AGENT_LAMBDA_TEST_KEY2')
assert ConfigProvider.get('thundra.agent.test.key') == 'test_value'
assert ConfigProvider.get('thundra.agent.lambda.test.key2') == 'test_value2'
assert ConfigProvider.get('THUNDRA_AGENT_TEST_KEY') is None
assert ConfigProvider.get('THUNDRA_AGENT_LAMBDA_TEST_KEY2') is None
def test_config_from_options(config_options):
ConfigProvider.__init__(options=config_options)
assert ConfigProvider.get('thundra.agent.my.key') == 'my-value'
assert ConfigProvider.get('thundra.agent.lambda.my.key2') == 'my-value2'
assert ConfigProvider.get('thundra.agent.my.key3') == 'my-value3'
assert ConfigProvider.get('thundra.agent.lambda.my.key4') == 'my-value4'
assert ConfigProvider.get('thundra.agent.my.key2') == 'my-value2'
assert ConfigProvider.get('thundra.agent.my.key4') == 'my-value4'
assert ConfigProvider.get('thundra.agent.my.key5') is None
def test_config_environment_variable_override_options(monkeypatch, config_options):
monkeypatch.setitem(os.environ, 'THUNDRA_AGENT_MY_KEY', 'my_value_from_env')
monkeypatch.setitem(os.environ, 'THUNDRA_AGENT_LAMBDA_MY_KEY2', 'my_value_from_env2')
ConfigProvider.__init__(options=config_options)
assert ConfigProvider.get('thundra.agent.my.key') == 'my_value_from_env'
assert ConfigProvider.get('thundra.agent.lambda.my.key2') == 'my_value_from_env2'
assert ConfigProvider.get('thundra.agent.my.key2') == 'my_value_from_env2'
def test_config_variable_correct_type(monkeypatch, options_with_different_type):
monkeypatch.setitem(os.environ, 'thundra_agent_lambda_debugger_port', '3000')
monkeypatch.setitem(os.environ, 'thundra_agent_trace_integrations_aws_dynamodb_traceInjection_enable', 'true')
ConfigProvider.__init__(options=options_with_different_type)
assert ConfigProvider.get('thundra.agent.lambda.debugger.port') == 3000
assert ConfigProvider.get('thundra.agent.trace.integrations.aws.dynamodb.traceinjection.enable') is True
assert ConfigProvider.get('thundra.agent.lambda.debugger.broker.port') == 444
assert ConfigProvider.get('thundra.agent.application.classname') == 'TEST'
assert ConfigProvider.get('thundra.agent.debug.enable') is True
def test_config_correct_default_value():
ConfigProvider.__init__()
assert ConfigProvider.get('thundra.agent.debug.enable') is False
assert ConfigProvider.get('thundra.agent.debug.enable', True) is True
assert ConfigProvider.get('thundra.agent.lambda.debugger.logs.enable') is False
| 34.888889 | 114 | 0.629838 | import os
import pytest
from thundra.config.config_provider import ConfigProvider
@pytest.fixture()
def config_options():
return {
'config': {
'my': {
'key': 'my-value'
},
'lambda': {
'my': {
'key2': 'my-value2'
}
},
'thundra': {
'agent': {
'my': {
'key3': 'my-value3'
},
'lambda': {
'my': {
'key4': 'my-value4'
}
}
}
}
}
}
@pytest.fixture()
def options_with_different_type():
return {
'config': {
'thundra': {
'agent': {
'application': {
'className': 'TEST'
},
'debug': {
'enable': True
},
'lambda': {
'debugger.broker.port': 444
}
}
}
}
}
def test_config_from_environment_variable(monkeypatch):
monkeypatch.setitem(os.environ, 'THUNDRA_AGENT_TEST_KEY', 'test_value')
monkeypatch.setitem(os.environ, 'THUNDRA_AGENT_LAMBDA_TEST_KEY2', 'test_value2')
ConfigProvider.__init__()
monkeypatch.delitem(os.environ, 'THUNDRA_AGENT_TEST_KEY')
monkeypatch.delitem(os.environ, 'THUNDRA_AGENT_LAMBDA_TEST_KEY2')
assert ConfigProvider.get('thundra.agent.test.key') == 'test_value'
assert ConfigProvider.get('thundra.agent.lambda.test.key2') == 'test_value2'
assert ConfigProvider.get('THUNDRA_AGENT_TEST_KEY') is None
assert ConfigProvider.get('THUNDRA_AGENT_LAMBDA_TEST_KEY2') is None
def test_config_from_options(config_options):
ConfigProvider.__init__(options=config_options)
assert ConfigProvider.get('thundra.agent.my.key') == 'my-value'
assert ConfigProvider.get('thundra.agent.lambda.my.key2') == 'my-value2'
assert ConfigProvider.get('thundra.agent.my.key3') == 'my-value3'
assert ConfigProvider.get('thundra.agent.lambda.my.key4') == 'my-value4'
assert ConfigProvider.get('thundra.agent.my.key2') == 'my-value2'
assert ConfigProvider.get('thundra.agent.my.key4') == 'my-value4'
assert ConfigProvider.get('thundra.agent.my.key5') is None
def test_config_environment_variable_override_options(monkeypatch, config_options):
monkeypatch.setitem(os.environ, 'THUNDRA_AGENT_MY_KEY', 'my_value_from_env')
monkeypatch.setitem(os.environ, 'THUNDRA_AGENT_LAMBDA_MY_KEY2', 'my_value_from_env2')
ConfigProvider.__init__(options=config_options)
assert ConfigProvider.get('thundra.agent.my.key') == 'my_value_from_env'
assert ConfigProvider.get('thundra.agent.lambda.my.key2') == 'my_value_from_env2'
assert ConfigProvider.get('thundra.agent.my.key2') == 'my_value_from_env2'
def test_config_variable_correct_type(monkeypatch, options_with_different_type):
monkeypatch.setitem(os.environ, 'thundra_agent_lambda_debugger_port', '3000')
monkeypatch.setitem(os.environ, 'thundra_agent_trace_integrations_aws_dynamodb_traceInjection_enable', 'true')
ConfigProvider.__init__(options=options_with_different_type)
assert ConfigProvider.get('thundra.agent.lambda.debugger.port') == 3000
assert ConfigProvider.get('thundra.agent.trace.integrations.aws.dynamodb.traceinjection.enable') is True
assert ConfigProvider.get('thundra.agent.lambda.debugger.broker.port') == 444
assert ConfigProvider.get('thundra.agent.application.classname') == 'TEST'
assert ConfigProvider.get('thundra.agent.debug.enable') is True
def test_config_correct_default_value():
ConfigProvider.__init__()
assert ConfigProvider.get('thundra.agent.debug.enable') is False
assert ConfigProvider.get('thundra.agent.debug.enable', True) is True
assert ConfigProvider.get('thundra.agent.lambda.debugger.logs.enable') is False
| true | true |
f71f521b683a7942f71c9124e2203f4da258ee2b | 4,799 | py | Python | tests/test_optimalK.py | alinaselega/gap_statistic | 2b94c46b676eef839f7709441a89bdc5796b2d31 | [
"MIT",
"Unlicense"
] | 132 | 2016-11-01T07:08:21.000Z | 2022-03-30T13:41:31.000Z | tests/test_optimalK.py | alinaselega/gap_statistic | 2b94c46b676eef839f7709441a89bdc5796b2d31 | [
"MIT",
"Unlicense"
] | 37 | 2016-10-18T12:18:35.000Z | 2022-02-23T04:22:19.000Z | tests/test_optimalK.py | alinaselega/gap_statistic | 2b94c46b676eef839f7709441a89bdc5796b2d31 | [
"MIT",
"Unlicense"
] | 43 | 2017-01-08T18:35:45.000Z | 2022-02-17T14:07:20.000Z | # -*- coding: utf-8 -*-
import os
import pytest
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans, MeanShift
from gap_statistic import OptimalK
def test_bad_init_config():
"""
Cannot define own clustering function and try to use Rust backend
"""
with pytest.raises(ValueError):
OptimalK(parallel_backend="rust", clusterer=lambda x, k: print("just testing"))
@pytest.mark.parametrize("ClusterModel", [KMeans, MeanShift])
def test_alternative_clusting_method(ClusterModel):
"""
Test that users can supply alternative clustering method as dep injection
"""
def clusterer(X: np.ndarray, k: int, another_test_arg):
"""
Function to wrap a sklearn model as a clusterer for OptimalK
First two arguments are always the data matrix, and k, and can supply
"""
m = ClusterModel()
m.fit(X)
assert another_test_arg == "test"
return m.cluster_centers_, m.predict(X)
optimalk = OptimalK(
n_jobs=-1,
parallel_backend="joblib",
clusterer=clusterer,
clusterer_kwargs={"another_test_arg": "test"},
)
X, y = make_blobs(n_samples=50, n_features=2, centers=3)
n_clusters = optimalk(X, n_refs=3, cluster_array=np.arange(1, 5))
assert isinstance(n_clusters, int)
@pytest.mark.parametrize(
"parallel_backend, n_jobs, n_clusters",
[
pytest.param(
"joblib", 1, 3, id="parallel_backend='joblib', n_jobs=1, n_clusters=3"
),
pytest.param(None, 1, 3, id="parallel_backend=None, n_jobs=1, n_clusters=3"),
# TODO: Add back this test param in rust side extension
# pytest.param(
# "rust", 1, 3, id="parallel_backend='rust', n_jobs=1, n_clusters=3"
# ),
],
)
def test_optimalk(parallel_backend, n_jobs, n_clusters):
"""
Test core functionality of OptimalK using all backends.
"""
# Create optimalK instance
optimalK = OptimalK(parallel_backend=parallel_backend, n_jobs=n_jobs)
# Create data
X, y = make_blobs(n_samples=int(1e3), n_features=2, centers=3)
suggested_clusters = optimalK(X, n_refs=3, cluster_array=np.arange(1, 10))
assert np.allclose(
suggested_clusters, n_clusters, 2
), "Correct clusters is {}, OptimalK suggested {}".format(
n_clusters, suggested_clusters
)
@pytest.mark.skipif(
"TEST_RUST_EXT" not in os.environ, reason="Rust extension not built."
)
def test_optimalk_rust_ext():
"""
Test core functionality of OptimalK using all backends.
"""
# Create optimalK instance
optimalK = OptimalK(parallel_backend="rust", n_jobs=1)
# Create data
X, y = make_blobs(n_samples=int(1e3), n_features=2, centers=3)
suggested_clusters = optimalK(X, n_refs=3, cluster_array=np.arange(1, 10))
assert np.allclose(
suggested_clusters, 3, 2
), "Correct clusters is {}, OptimalK suggested {}".format(3, suggested_clusters)
def test_optimalk_cluster_array_vs_data_sizes_error():
"""
Test ValueError when cluster_array is larger than dataset.
"""
import numpy as np
from gap_statistic import OptimalK
# Create optimalK instance
optimalK = OptimalK(parallel_backend=None, n_jobs=-1)
# Create data
X, y = make_blobs(n_samples=5, n_features=2, centers=3)
with pytest.raises(ValueError) as excinfo:
optimalK(X, cluster_array=np.arange(1, 10))
assert "The number of suggested clusters to try" in str(excinfo.value)
def test_optimalk_cluster_array_values_error():
"""
Test ValueError when cluster_array contains values less than 1
"""
from gap_statistic import OptimalK
# Create optimalK instance
optimalK = OptimalK(parallel_backend=None, n_jobs=-1)
# Create data
X, y = make_blobs(n_samples=int(1e3), n_features=2, centers=3)
with pytest.raises(ValueError) as excinfo:
optimalK(X, cluster_array=[0, -1, 1, 2, 3])
assert "cluster_array contains values less than 1" in str(excinfo.value)
def test_optimalk_cluster_array_empty_error():
"""
Test ValueError when cluster_array is empty.
"""
from gap_statistic import OptimalK
# Create optimalK instance
optimalK = OptimalK(parallel_backend=None, n_jobs=-1)
# Create data
X, y = make_blobs(n_samples=int(1e3), n_features=2, centers=3)
with pytest.raises(ValueError) as excinfo:
optimalK(X, cluster_array=[])
assert "The supplied cluster_array has no values." in str(excinfo.value)
def test_dunders():
"""
Test that implemented dunder methods don't return errors
"""
from gap_statistic import OptimalK
optimalK = OptimalK()
optimalK.__str__()
optimalK.__repr__()
optimalK._repr_html_()
| 29.441718 | 87 | 0.681184 |
import os
import pytest
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans, MeanShift
from gap_statistic import OptimalK
def test_bad_init_config():
with pytest.raises(ValueError):
OptimalK(parallel_backend="rust", clusterer=lambda x, k: print("just testing"))
@pytest.mark.parametrize("ClusterModel", [KMeans, MeanShift])
def test_alternative_clusting_method(ClusterModel):
def clusterer(X: np.ndarray, k: int, another_test_arg):
m = ClusterModel()
m.fit(X)
assert another_test_arg == "test"
return m.cluster_centers_, m.predict(X)
optimalk = OptimalK(
n_jobs=-1,
parallel_backend="joblib",
clusterer=clusterer,
clusterer_kwargs={"another_test_arg": "test"},
)
X, y = make_blobs(n_samples=50, n_features=2, centers=3)
n_clusters = optimalk(X, n_refs=3, cluster_array=np.arange(1, 5))
assert isinstance(n_clusters, int)
@pytest.mark.parametrize(
"parallel_backend, n_jobs, n_clusters",
[
pytest.param(
"joblib", 1, 3, id="parallel_backend='joblib', n_jobs=1, n_clusters=3"
),
pytest.param(None, 1, 3, id="parallel_backend=None, n_jobs=1, n_clusters=3"),
],
)
def test_optimalk(parallel_backend, n_jobs, n_clusters):
optimalK = OptimalK(parallel_backend=parallel_backend, n_jobs=n_jobs)
X, y = make_blobs(n_samples=int(1e3), n_features=2, centers=3)
suggested_clusters = optimalK(X, n_refs=3, cluster_array=np.arange(1, 10))
assert np.allclose(
suggested_clusters, n_clusters, 2
), "Correct clusters is {}, OptimalK suggested {}".format(
n_clusters, suggested_clusters
)
@pytest.mark.skipif(
"TEST_RUST_EXT" not in os.environ, reason="Rust extension not built."
)
def test_optimalk_rust_ext():
optimalK = OptimalK(parallel_backend="rust", n_jobs=1)
X, y = make_blobs(n_samples=int(1e3), n_features=2, centers=3)
suggested_clusters = optimalK(X, n_refs=3, cluster_array=np.arange(1, 10))
assert np.allclose(
suggested_clusters, 3, 2
), "Correct clusters is {}, OptimalK suggested {}".format(3, suggested_clusters)
def test_optimalk_cluster_array_vs_data_sizes_error():
import numpy as np
from gap_statistic import OptimalK
optimalK = OptimalK(parallel_backend=None, n_jobs=-1)
X, y = make_blobs(n_samples=5, n_features=2, centers=3)
with pytest.raises(ValueError) as excinfo:
optimalK(X, cluster_array=np.arange(1, 10))
assert "The number of suggested clusters to try" in str(excinfo.value)
def test_optimalk_cluster_array_values_error():
from gap_statistic import OptimalK
optimalK = OptimalK(parallel_backend=None, n_jobs=-1)
X, y = make_blobs(n_samples=int(1e3), n_features=2, centers=3)
with pytest.raises(ValueError) as excinfo:
optimalK(X, cluster_array=[0, -1, 1, 2, 3])
assert "cluster_array contains values less than 1" in str(excinfo.value)
def test_optimalk_cluster_array_empty_error():
from gap_statistic import OptimalK
optimalK = OptimalK(parallel_backend=None, n_jobs=-1)
X, y = make_blobs(n_samples=int(1e3), n_features=2, centers=3)
with pytest.raises(ValueError) as excinfo:
optimalK(X, cluster_array=[])
assert "The supplied cluster_array has no values." in str(excinfo.value)
def test_dunders():
from gap_statistic import OptimalK
optimalK = OptimalK()
optimalK.__str__()
optimalK.__repr__()
optimalK._repr_html_()
| true | true |
f71f533ceeca3968a0d37a1a87b62202c911fd86 | 11,743 | py | Python | samples/openapi3/client/features/dynamic-servers/python/dynamic_servers/api/usage_api.py | JigarJoshi/openapi-generator | 785535b8d6881b358463994823abbda2b26ff42e | [
"Apache-2.0"
] | 1 | 2022-01-24T08:22:21.000Z | 2022-01-24T08:22:21.000Z | samples/openapi3/client/features/dynamic-servers/python/dynamic_servers/api/usage_api.py | JigarJoshi/openapi-generator | 785535b8d6881b358463994823abbda2b26ff42e | [
"Apache-2.0"
] | 4 | 2021-09-29T08:46:32.000Z | 2021-12-08T09:07:04.000Z | samples/openapi3/client/features/dynamic-servers/python/dynamic_servers/api/usage_api.py | JigarJoshi/openapi-generator | 785535b8d6881b358463994823abbda2b26ff42e | [
"Apache-2.0"
] | 1 | 2022-02-24T15:54:44.000Z | 2022-02-24T15:54:44.000Z | """
OpenAPI Extension with dynamic servers
This specification shows how to use dynamic servers. # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from dynamic_servers.api_client import ApiClient, Endpoint as _Endpoint
from dynamic_servers.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
class UsageApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.custom_server_endpoint = _Endpoint(
settings={
'response_type': ({str: (bool, date, datetime, dict, float, int, list, str, none_type)},),
'auth': [],
'endpoint_path': '/custom',
'operation_id': 'custom_server',
'http_method': 'GET',
'servers': [
{
'url': "https://{server}.swagger.io:{port}/v2",
'description': "No description provided",
'variables': {
'server': {
'description': "No description provided",
'default_value': "custom-petstore",
'enum_values': [
"custom-petstore",
"custom-qa-petstore",
"custom-dev-petstore"
]
},
'port': {
'description': "No description provided",
'default_value': "8080",
'enum_values': [
"80",
"8080"
]
}
}
},
{
'url': "https://localhost:8081/{version}",
'description': "The local custom server",
'variables': {
'version': {
'description': "No description provided",
'default_value': "v2",
'enum_values': [
"v1",
"v2",
"v3"
]
}
}
},
{
'url': "https://third.example.com/{prefix}",
'description': "The local custom server",
'variables': {
'prefix': {
'description': "No description provided",
'default_value': "custom",
}
}
},
]
},
params_map={
'all': [
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.default_server_endpoint = _Endpoint(
settings={
'response_type': ({str: (bool, date, datetime, dict, float, int, list, str, none_type)},),
'auth': [],
'endpoint_path': '/default',
'operation_id': 'default_server',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
def custom_server(
self,
**kwargs
):
"""Use custom server # noqa: E501
Use custom server # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.custom_server(async_req=True)
>>> result = thread.get()
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
{str: (bool, date, datetime, dict, float, int, list, str, none_type)}
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
return self.custom_server_endpoint.call_with_http_info(**kwargs)
def default_server(
self,
**kwargs
):
"""Use default server # noqa: E501
Use default server # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.default_server(async_req=True)
>>> result = thread.get()
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_content_type (str/None): force body content-type.
Default is None and content-type will be predicted by allowed
content-types and body.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
{str: (bool, date, datetime, dict, float, int, list, str, none_type)}
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
return self.default_server_endpoint.call_with_http_info(**kwargs)
| 37.044164 | 106 | 0.471941 |
import re
import sys
from dynamic_servers.api_client import ApiClient, Endpoint as _Endpoint
from dynamic_servers.model_utils import (
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types
)
class UsageApi(object):
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.custom_server_endpoint = _Endpoint(
settings={
'response_type': ({str: (bool, date, datetime, dict, float, int, list, str, none_type)},),
'auth': [],
'endpoint_path': '/custom',
'operation_id': 'custom_server',
'http_method': 'GET',
'servers': [
{
'url': "https://{server}.swagger.io:{port}/v2",
'description': "No description provided",
'variables': {
'server': {
'description': "No description provided",
'default_value': "custom-petstore",
'enum_values': [
"custom-petstore",
"custom-qa-petstore",
"custom-dev-petstore"
]
},
'port': {
'description': "No description provided",
'default_value': "8080",
'enum_values': [
"80",
"8080"
]
}
}
},
{
'url': "https://localhost:8081/{version}",
'description': "The local custom server",
'variables': {
'version': {
'description': "No description provided",
'default_value': "v2",
'enum_values': [
"v1",
"v2",
"v3"
]
}
}
},
{
'url': "https://third.example.com/{prefix}",
'description': "The local custom server",
'variables': {
'prefix': {
'description': "No description provided",
'default_value': "custom",
}
}
},
]
},
params_map={
'all': [
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
self.default_server_endpoint = _Endpoint(
settings={
'response_type': ({str: (bool, date, datetime, dict, float, int, list, str, none_type)},),
'auth': [],
'endpoint_path': '/default',
'operation_id': 'default_server',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
],
'required': [],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
},
'attribute_map': {
},
'location_map': {
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [],
},
api_client=api_client
)
def custom_server(
self,
**kwargs
):
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
return self.custom_server_endpoint.call_with_http_info(**kwargs)
def default_server(
self,
**kwargs
):
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_spec_property_naming'] = kwargs.get(
'_spec_property_naming', False
)
kwargs['_content_type'] = kwargs.get(
'_content_type')
kwargs['_host_index'] = kwargs.get('_host_index')
return self.default_server_endpoint.call_with_http_info(**kwargs)
| true | true |
f71f559efb8f3cc65c106ea9756849f94c18c509 | 1,170 | py | Python | test/test_main.py | LucaMarconato/phyper | 065f41dbdce93b95cd2f8a16ad72a1cf57826c66 | [
"MIT"
] | 1 | 2020-08-14T07:40:18.000Z | 2020-08-14T07:40:18.000Z | test/test_main.py | LucaMarconato/phyper | 065f41dbdce93b95cd2f8a16ad72a1cf57826c66 | [
"MIT"
] | null | null | null | test/test_main.py | LucaMarconato/phyper | 065f41dbdce93b95cd2f8a16ad72a1cf57826c66 | [
"MIT"
] | null | null | null | import phyper
from typing import List
from pprint import pprint
import pandas as pd
class NonKeys:
n_epochs = 11
batch_size = 10
resume_training = False
another_non_key = True
class MyParser(phyper.Parser, NonKeys):
my_testa: str = 1
ehi = None
bbbbb = 32
c = 'ehi'
hashed_resources_folder = 'hashed_resources'
my_parser = MyParser(hashed_resources_folder)
my_parser.register_new_resource(name='normalizer', dependencies=['my_testa', 'ehi', 'bbbbb'])
print(my_parser.get_hyperparameters())
print(my_parser.get_hashable_hyperparameters())
my_instance = my_parser.new_instance()
my_instance.get_instance_hash()
print(my_instance.get_hyperparameters())
print(my_instance.get_hashable_hyperparameters())
print(my_instance.get_instance_hash())
print(my_instance.get_instance_hash('normalizer'))
# print(my_instance.get_instance_hash('c'))
print(my_instance.get_resources_path())
print(my_instance.get_resources_path('normalizer'))
d = {'n_epochs': [50], 'c': ['c0', 'c1'], 'my_testa': [1, 2, 3]}
instances: List[MyParser] = my_parser.get_instances_from_dictionary(d)
for instance in instances:
print(instance.get_instance_hash())
| 27.857143 | 93 | 0.766667 | import phyper
from typing import List
from pprint import pprint
import pandas as pd
class NonKeys:
n_epochs = 11
batch_size = 10
resume_training = False
another_non_key = True
class MyParser(phyper.Parser, NonKeys):
my_testa: str = 1
ehi = None
bbbbb = 32
c = 'ehi'
hashed_resources_folder = 'hashed_resources'
my_parser = MyParser(hashed_resources_folder)
my_parser.register_new_resource(name='normalizer', dependencies=['my_testa', 'ehi', 'bbbbb'])
print(my_parser.get_hyperparameters())
print(my_parser.get_hashable_hyperparameters())
my_instance = my_parser.new_instance()
my_instance.get_instance_hash()
print(my_instance.get_hyperparameters())
print(my_instance.get_hashable_hyperparameters())
print(my_instance.get_instance_hash())
print(my_instance.get_instance_hash('normalizer'))
print(my_instance.get_resources_path())
print(my_instance.get_resources_path('normalizer'))
d = {'n_epochs': [50], 'c': ['c0', 'c1'], 'my_testa': [1, 2, 3]}
instances: List[MyParser] = my_parser.get_instances_from_dictionary(d)
for instance in instances:
print(instance.get_instance_hash())
| true | true |
f71f55c54252740d7984c8598467133969e771fe | 1,091 | py | Python | motion_primitives_py/motion_primitives_py/examples/dispersion_algorithm_animation.py | ljarin/dispersion_motion_planning | 1c16c95b70915e58e407c1a45aa4065877fbb3de | [
"BSD-3-Clause"
] | 1 | 2022-03-04T12:03:26.000Z | 2022-03-04T12:03:26.000Z | motion_primitives_py/motion_primitives_py/examples/dispersion_algorithm_animation.py | ljarin/dispersion_motion_planning | 1c16c95b70915e58e407c1a45aa4065877fbb3de | [
"BSD-3-Clause"
] | null | null | null | motion_primitives_py/motion_primitives_py/examples/dispersion_algorithm_animation.py | ljarin/dispersion_motion_planning | 1c16c95b70915e58e407c1a45aa4065877fbb3de | [
"BSD-3-Clause"
] | null | null | null | # %%
from motion_primitives_py import *
import numpy as np
import time
from pycallgraph import PyCallGraph, Config
from pycallgraph.output import GraphvizOutput
"""
Animate the evolution of the min. dispersion algorithm
"""
tiling = True
plot = False
animate = True
check_backwards_dispersion = False
mp_subclass_specific_data = {}
# %%
# define parameters
control_space_q = 2
num_dims = 2
max_state = [3.5, 2*np.pi]
motion_primitive_type = ReedsSheppMotionPrimitive
# resolution = [.51, .5]
num_dense_samples = 100
# # # %%
# motion_primitive_type = PolynomialMotionPrimitive
# control_space_q = 2
# num_dims = 2
# max_state = [3.51, 1.51, 10, 100]
# mp_subclass_specific_data = {'iterative_bvp_dt': .1, 'iterative_bvp_max_t': 5, 'rho': 10}
# num_dense_samples = 200
# %%
# build lattice
mpl = MotionPrimitiveLattice(control_space_q, num_dims, max_state, motion_primitive_type, tiling, False, mp_subclass_specific_data)
mpl.compute_min_dispersion_space(
num_output_pts=10, check_backwards_dispersion=check_backwards_dispersion, animate=animate, num_dense_samples=num_dense_samples)
| 27.974359 | 131 | 0.781852 |
from motion_primitives_py import *
import numpy as np
import time
from pycallgraph import PyCallGraph, Config
from pycallgraph.output import GraphvizOutput
tiling = True
plot = False
animate = True
check_backwards_dispersion = False
mp_subclass_specific_data = {}
control_space_q = 2
num_dims = 2
max_state = [3.5, 2*np.pi]
motion_primitive_type = ReedsSheppMotionPrimitive
num_dense_samples = 100
mpl = MotionPrimitiveLattice(control_space_q, num_dims, max_state, motion_primitive_type, tiling, False, mp_subclass_specific_data)
mpl.compute_min_dispersion_space(
num_output_pts=10, check_backwards_dispersion=check_backwards_dispersion, animate=animate, num_dense_samples=num_dense_samples)
| true | true |
f71f573d416e2f35d92d643b1b9835d4b1c1c202 | 13,297 | py | Python | KiBuzzard/buzzard/modules/svgstring2path.py | HDR/KiBuzzard | b9e2cff0783b7cda9b8d68f2d2b5077b48d3a838 | [
"MIT"
] | 240 | 2021-01-11T14:49:24.000Z | 2022-03-29T22:33:49.000Z | KiBuzzard/buzzard/modules/svgstring2path.py | HDR/KiBuzzard | b9e2cff0783b7cda9b8d68f2d2b5077b48d3a838 | [
"MIT"
] | 77 | 2021-01-12T20:23:30.000Z | 2022-03-28T12:14:34.000Z | KiBuzzard/buzzard/modules/svgstring2path.py | HDR/KiBuzzard | b9e2cff0783b7cda9b8d68f2d2b5077b48d3a838 | [
"MIT"
] | 28 | 2021-01-17T05:44:11.000Z | 2022-01-11T19:58:46.000Z | # This is a conglomeration of modules removed from https://github.com/mathandy/svgpathtools
# in order to support a modified 'svg2paths' method called 'string2paths' which takes an
# svg string as an argument instead of a filename.
from svgpathtools import Line, QuadraticBezier, CubicBezier, Path, Arc
from xml.dom.minidom import parseString
import warnings
import re
try:
str = basestring
except NameError:
pass
COMMANDS = set('MmZzLlHhVvCcSsQqTtAa')
UPPERCASE = set('MZLHVCSQTA')
COMMAND_RE = re.compile("([MmZzLlHhVvCcSsQqTtAa])")
FLOAT_RE = re.compile("[-+]?[0-9]*\.?[0-9]+(?:[eE][-+]?[0-9]+)?")
COORD_PAIR_TMPLT = re.compile(
r'([\+-]?\d*[\.\d]\d*[eE][\+-]?\d+|[\+-]?\d*[\.\d]\d*)' +
r'(?:\s*,\s*|\s+|(?=-))' +
r'([\+-]?\d*[\.\d]\d*[eE][\+-]?\d+|[\+-]?\d*[\.\d]\d*)'
)
def path2pathd(path):
return path.get('d', '')
def ellipse2pathd(ellipse):
"""converts the parameters from an ellipse or a circle to a string for a
Path object d-attribute"""
cx = ellipse.get('cx', 0)
cy = ellipse.get('cy', 0)
rx = ellipse.get('rx', None)
ry = ellipse.get('ry', None)
r = ellipse.get('r', None)
if r is not None:
rx = ry = float(r)
else:
rx = float(rx)
ry = float(ry)
cx = float(cx)
cy = float(cy)
d = ''
d += 'M' + str(cx - rx) + ',' + str(cy)
d += 'a' + str(rx) + ',' + str(ry) + ' 0 1,0 ' + str(2 * rx) + ',0'
d += 'a' + str(rx) + ',' + str(ry) + ' 0 1,0 ' + str(-2 * rx) + ',0'
return d
def polyline2pathd(polyline_d, is_polygon=False):
"""converts the string from a polyline points-attribute to a string for a
Path object d-attribute"""
points = COORD_PAIR_TMPLT.findall(polyline_d)
closed = (float(points[0][0]) == float(points[-1][0]) and
float(points[0][1]) == float(points[-1][1]))
# The `parse_path` call ignores redundant 'z' (closure) commands
# e.g. `parse_path('M0 0L100 100Z') == parse_path('M0 0L100 100L0 0Z')`
# This check ensures that an n-point polygon is converted to an n-Line path.
if is_polygon and closed:
points.append(points[0])
d = 'M' + 'L'.join('{0} {1}'.format(x,y) for x,y in points)
if is_polygon or closed:
d += 'z'
return d
def polygon2pathd(polyline_d):
"""converts the string from a polygon points-attribute to a string
for a Path object d-attribute.
Note: For a polygon made from n points, the resulting path will be
composed of n lines (even if some of these lines have length zero).
"""
return polyline2pathd(polyline_d, True)
def rect2pathd(rect):
"""Converts an SVG-rect element to a Path d-string.
The rectangle will start at the (x,y) coordinate specified by the
rectangle object and proceed counter-clockwise."""
x0, y0 = float(rect.get('x', 0)), float(rect.get('y', 0))
w, h = float(rect.get('width', 0)), float(rect.get('height', 0))
x1, y1 = x0 + w, y0
x2, y2 = x0 + w, y0 + h
x3, y3 = x0, y0 + h
d = ("M{} {} L {} {} L {} {} L {} {} z"
"".format(x0, y0, x1, y1, x2, y2, x3, y3))
return d
def line2pathd(l):
return 'M' + l['x1'] + ' ' + l['y1'] + 'L' + l['x2'] + ' ' + l['y2']
def string2paths(svg_string,
return_svg_attributes=True,
convert_circles_to_paths=True,
convert_ellipses_to_paths=True,
convert_lines_to_paths=True,
convert_polylines_to_paths=True,
convert_polygons_to_paths=True,
convert_rectangles_to_paths=True):
doc = parseString(svg_string)
def dom2dict(element):
"""Converts DOM elements to dictionaries of attributes."""
keys = list(element.attributes.keys())
values = [val.value for val in list(element.attributes.values())]
return dict(list(zip(keys, values)))
# Use minidom to extract path strings from input SVG
paths = [dom2dict(el) for el in doc.getElementsByTagName('path')]
d_strings = [el['d'] for el in paths]
attribute_dictionary_list = paths
# Use minidom to extract polyline strings from input SVG, convert to
# path strings, add to list
if convert_polylines_to_paths:
plins = [dom2dict(el) for el in doc.getElementsByTagName('polyline')]
d_strings += [polyline2pathd(pl['points']) for pl in plins]
attribute_dictionary_list += plins
# Use minidom to extract polygon strings from input SVG, convert to
# path strings, add to list
if convert_polygons_to_paths:
pgons = [dom2dict(el) for el in doc.getElementsByTagName('polygon')]
d_strings += [polygon2pathd(pg['points']) for pg in pgons]
attribute_dictionary_list += pgons
if convert_lines_to_paths:
lines = [dom2dict(el) for el in doc.getElementsByTagName('line')]
d_strings += [('M' + l['x1'] + ' ' + l['y1'] +
'L' + l['x2'] + ' ' + l['y2']) for l in lines]
attribute_dictionary_list += lines
if convert_ellipses_to_paths:
ellipses = [dom2dict(el) for el in doc.getElementsByTagName('ellipse')]
d_strings += [ellipse2pathd(e) for e in ellipses]
attribute_dictionary_list += ellipses
if convert_circles_to_paths:
circles = [dom2dict(el) for el in doc.getElementsByTagName('circle')]
d_strings += [ellipse2pathd(c) for c in circles]
attribute_dictionary_list += circles
if convert_rectangles_to_paths:
rectangles = [dom2dict(el) for el in doc.getElementsByTagName('rect')]
d_strings += [rect2pathd(r) for r in rectangles]
attribute_dictionary_list += rectangles
if return_svg_attributes:
svg_attributes = dom2dict(doc.getElementsByTagName('svg')[0])
doc.unlink()
path_list = [parse_path(d) for d in d_strings]
return path_list, attribute_dictionary_list, svg_attributes
else:
doc.unlink()
path_list = [parse_path(d) for d in d_strings]
return path_list, attribute_dictionary_list
def _tokenize_path(pathdef):
for x in COMMAND_RE.split(pathdef):
if x in COMMANDS:
yield x
for token in FLOAT_RE.findall(x):
yield token
def parse_path(pathdef, current_pos=0j, tree_element=None):
# In the SVG specs, initial movetos are absolute, even if
# specified as 'm'. This is the default behavior here as well.
# But if you pass in a current_pos variable, the initial moveto
# will be relative to that current_pos. This is useful.
elements = list(_tokenize_path(pathdef))
# Reverse for easy use of .pop()
elements.reverse()
if tree_element is None:
segments = Path()
else:
segments = Path(tree_element=tree_element)
start_pos = None
command = None
while elements:
if elements[-1] in COMMANDS:
# New command.
last_command = command # Used by S and T
command = elements.pop()
absolute = command in UPPERCASE
command = command.upper()
else:
# If this element starts with numbers, it is an implicit command
# and we don't change the command. Check that it's allowed:
if command is None:
raise ValueError("Unallowed implicit command in %s, position %s" % (
pathdef, len(pathdef.split()) - len(elements)))
if command == 'M':
# Moveto command.
x = elements.pop()
y = elements.pop()
pos = float(x) + float(y) * 1j
if absolute:
current_pos = pos
else:
current_pos += pos
# when M is called, reset start_pos
# This behavior of Z is defined in svg spec:
# http://www.w3.org/TR/SVG/paths.html#PathDataClosePathCommand
start_pos = current_pos
# Implicit moveto commands are treated as lineto commands.
# So we set command to lineto here, in case there are
# further implicit commands after this moveto.
command = 'L'
elif command == 'Z':
# Close path
if not (current_pos == start_pos):
segments.append(Line(current_pos, start_pos))
segments.closed = True
current_pos = start_pos
command = None
elif command == 'L':
x = elements.pop()
y = elements.pop()
pos = float(x) + float(y) * 1j
if not absolute:
pos += current_pos
segments.append(Line(current_pos, pos))
current_pos = pos
elif command == 'H':
x = elements.pop()
pos = float(x) + current_pos.imag * 1j
if not absolute:
pos += current_pos.real
segments.append(Line(current_pos, pos))
current_pos = pos
elif command == 'V':
y = elements.pop()
pos = current_pos.real + float(y) * 1j
if not absolute:
pos += current_pos.imag * 1j
segments.append(Line(current_pos, pos))
current_pos = pos
elif command == 'C':
control1 = float(elements.pop()) + float(elements.pop()) * 1j
control2 = float(elements.pop()) + float(elements.pop()) * 1j
end = float(elements.pop()) + float(elements.pop()) * 1j
if not absolute:
control1 += current_pos
control2 += current_pos
end += current_pos
segments.append(CubicBezier(current_pos, control1, control2, end))
current_pos = end
elif command == 'S':
# Smooth curve. First control point is the "reflection" of
# the second control point in the previous path.
if last_command not in 'CS':
# If there is no previous command or if the previous command
# was not an C, c, S or s, assume the first control point is
# coincident with the current point.
control1 = current_pos
else:
# The first control point is assumed to be the reflection of
# the second control point on the previous command relative
# to the current point.
control1 = current_pos + current_pos - segments[-1].control2
control2 = float(elements.pop()) + float(elements.pop()) * 1j
end = float(elements.pop()) + float(elements.pop()) * 1j
if not absolute:
control2 += current_pos
end += current_pos
segments.append(CubicBezier(current_pos, control1, control2, end))
current_pos = end
elif command == 'Q':
control = float(elements.pop()) + float(elements.pop()) * 1j
end = float(elements.pop()) + float(elements.pop()) * 1j
if not absolute:
control += current_pos
end += current_pos
segments.append(QuadraticBezier(current_pos, control, end))
current_pos = end
elif command == 'T':
# Smooth curve. Control point is the "reflection" of
# the second control point in the previous path.
if last_command not in 'QT':
# If there is no previous command or if the previous command
# was not an Q, q, T or t, assume the first control point is
# coincident with the current point.
control = current_pos
else:
# The control point is assumed to be the reflection of
# the control point on the previous command relative
# to the current point.
control = current_pos + current_pos - segments[-1].control
end = float(elements.pop()) + float(elements.pop()) * 1j
if not absolute:
end += current_pos
segments.append(QuadraticBezier(current_pos, control, end))
current_pos = end
elif command == 'A':
radius = float(elements.pop()) + float(elements.pop()) * 1j
rotation = float(elements.pop())
arc = float(elements.pop())
sweep = float(elements.pop())
end = float(elements.pop()) + float(elements.pop()) * 1j
if not absolute:
end += current_pos
segments.append(Arc(current_pos, radius, rotation, arc, sweep, end))
current_pos = end
return segments
def _check_num_parsed_values(values, allowed):
if not any(num == len(values) for num in allowed):
if len(allowed) > 1:
warnings.warn('Expected one of the following number of values {0}, but found {1} values instead: {2}'
.format(allowed, len(values), values))
elif allowed[0] != 1:
warnings.warn('Expected {0} values, found {1}: {2}'.format(allowed[0], len(values), values))
else:
warnings.warn('Expected 1 value, found {0}: {1}'.format(len(values), values))
return False
return True
def parse_transform(transform_str):
warnings.warn('Transforms not implemented')
| 36.53022 | 113 | 0.582011 |
from svgpathtools import Line, QuadraticBezier, CubicBezier, Path, Arc
from xml.dom.minidom import parseString
import warnings
import re
try:
str = basestring
except NameError:
pass
COMMANDS = set('MmZzLlHhVvCcSsQqTtAa')
UPPERCASE = set('MZLHVCSQTA')
COMMAND_RE = re.compile("([MmZzLlHhVvCcSsQqTtAa])")
FLOAT_RE = re.compile("[-+]?[0-9]*\.?[0-9]+(?:[eE][-+]?[0-9]+)?")
COORD_PAIR_TMPLT = re.compile(
r'([\+-]?\d*[\.\d]\d*[eE][\+-]?\d+|[\+-]?\d*[\.\d]\d*)' +
r'(?:\s*,\s*|\s+|(?=-))' +
r'([\+-]?\d*[\.\d]\d*[eE][\+-]?\d+|[\+-]?\d*[\.\d]\d*)'
)
def path2pathd(path):
return path.get('d', '')
def ellipse2pathd(ellipse):
cx = ellipse.get('cx', 0)
cy = ellipse.get('cy', 0)
rx = ellipse.get('rx', None)
ry = ellipse.get('ry', None)
r = ellipse.get('r', None)
if r is not None:
rx = ry = float(r)
else:
rx = float(rx)
ry = float(ry)
cx = float(cx)
cy = float(cy)
d = ''
d += 'M' + str(cx - rx) + ',' + str(cy)
d += 'a' + str(rx) + ',' + str(ry) + ' 0 1,0 ' + str(2 * rx) + ',0'
d += 'a' + str(rx) + ',' + str(ry) + ' 0 1,0 ' + str(-2 * rx) + ',0'
return d
def polyline2pathd(polyline_d, is_polygon=False):
points = COORD_PAIR_TMPLT.findall(polyline_d)
closed = (float(points[0][0]) == float(points[-1][0]) and
float(points[0][1]) == float(points[-1][1]))
if is_polygon and closed:
points.append(points[0])
d = 'M' + 'L'.join('{0} {1}'.format(x,y) for x,y in points)
if is_polygon or closed:
d += 'z'
return d
def polygon2pathd(polyline_d):
return polyline2pathd(polyline_d, True)
def rect2pathd(rect):
x0, y0 = float(rect.get('x', 0)), float(rect.get('y', 0))
w, h = float(rect.get('width', 0)), float(rect.get('height', 0))
x1, y1 = x0 + w, y0
x2, y2 = x0 + w, y0 + h
x3, y3 = x0, y0 + h
d = ("M{} {} L {} {} L {} {} L {} {} z"
"".format(x0, y0, x1, y1, x2, y2, x3, y3))
return d
def line2pathd(l):
return 'M' + l['x1'] + ' ' + l['y1'] + 'L' + l['x2'] + ' ' + l['y2']
def string2paths(svg_string,
return_svg_attributes=True,
convert_circles_to_paths=True,
convert_ellipses_to_paths=True,
convert_lines_to_paths=True,
convert_polylines_to_paths=True,
convert_polygons_to_paths=True,
convert_rectangles_to_paths=True):
doc = parseString(svg_string)
def dom2dict(element):
keys = list(element.attributes.keys())
values = [val.value for val in list(element.attributes.values())]
return dict(list(zip(keys, values)))
paths = [dom2dict(el) for el in doc.getElementsByTagName('path')]
d_strings = [el['d'] for el in paths]
attribute_dictionary_list = paths
if convert_polylines_to_paths:
plins = [dom2dict(el) for el in doc.getElementsByTagName('polyline')]
d_strings += [polyline2pathd(pl['points']) for pl in plins]
attribute_dictionary_list += plins
if convert_polygons_to_paths:
pgons = [dom2dict(el) for el in doc.getElementsByTagName('polygon')]
d_strings += [polygon2pathd(pg['points']) for pg in pgons]
attribute_dictionary_list += pgons
if convert_lines_to_paths:
lines = [dom2dict(el) for el in doc.getElementsByTagName('line')]
d_strings += [('M' + l['x1'] + ' ' + l['y1'] +
'L' + l['x2'] + ' ' + l['y2']) for l in lines]
attribute_dictionary_list += lines
if convert_ellipses_to_paths:
ellipses = [dom2dict(el) for el in doc.getElementsByTagName('ellipse')]
d_strings += [ellipse2pathd(e) for e in ellipses]
attribute_dictionary_list += ellipses
if convert_circles_to_paths:
circles = [dom2dict(el) for el in doc.getElementsByTagName('circle')]
d_strings += [ellipse2pathd(c) for c in circles]
attribute_dictionary_list += circles
if convert_rectangles_to_paths:
rectangles = [dom2dict(el) for el in doc.getElementsByTagName('rect')]
d_strings += [rect2pathd(r) for r in rectangles]
attribute_dictionary_list += rectangles
if return_svg_attributes:
svg_attributes = dom2dict(doc.getElementsByTagName('svg')[0])
doc.unlink()
path_list = [parse_path(d) for d in d_strings]
return path_list, attribute_dictionary_list, svg_attributes
else:
doc.unlink()
path_list = [parse_path(d) for d in d_strings]
return path_list, attribute_dictionary_list
def _tokenize_path(pathdef):
for x in COMMAND_RE.split(pathdef):
if x in COMMANDS:
yield x
for token in FLOAT_RE.findall(x):
yield token
def parse_path(pathdef, current_pos=0j, tree_element=None):
elements = list(_tokenize_path(pathdef))
elements.reverse()
if tree_element is None:
segments = Path()
else:
segments = Path(tree_element=tree_element)
start_pos = None
command = None
while elements:
if elements[-1] in COMMANDS:
last_command = command
command = elements.pop()
absolute = command in UPPERCASE
command = command.upper()
else:
if command is None:
raise ValueError("Unallowed implicit command in %s, position %s" % (
pathdef, len(pathdef.split()) - len(elements)))
if command == 'M':
x = elements.pop()
y = elements.pop()
pos = float(x) + float(y) * 1j
if absolute:
current_pos = pos
else:
current_pos += pos
current_pos
command = 'L'
elif command == 'Z':
if not (current_pos == start_pos):
segments.append(Line(current_pos, start_pos))
segments.closed = True
current_pos = start_pos
command = None
elif command == 'L':
x = elements.pop()
y = elements.pop()
pos = float(x) + float(y) * 1j
if not absolute:
pos += current_pos
segments.append(Line(current_pos, pos))
current_pos = pos
elif command == 'H':
x = elements.pop()
pos = float(x) + current_pos.imag * 1j
if not absolute:
pos += current_pos.real
segments.append(Line(current_pos, pos))
current_pos = pos
elif command == 'V':
y = elements.pop()
pos = current_pos.real + float(y) * 1j
if not absolute:
pos += current_pos.imag * 1j
segments.append(Line(current_pos, pos))
current_pos = pos
elif command == 'C':
control1 = float(elements.pop()) + float(elements.pop()) * 1j
control2 = float(elements.pop()) + float(elements.pop()) * 1j
end = float(elements.pop()) + float(elements.pop()) * 1j
if not absolute:
control1 += current_pos
control2 += current_pos
end += current_pos
segments.append(CubicBezier(current_pos, control1, control2, end))
current_pos = end
elif command == 'S':
if last_command not in 'CS':
control1 = current_pos
else:
control1 = current_pos + current_pos - segments[-1].control2
control2 = float(elements.pop()) + float(elements.pop()) * 1j
end = float(elements.pop()) + float(elements.pop()) * 1j
if not absolute:
control2 += current_pos
end += current_pos
segments.append(CubicBezier(current_pos, control1, control2, end))
current_pos = end
elif command == 'Q':
control = float(elements.pop()) + float(elements.pop()) * 1j
end = float(elements.pop()) + float(elements.pop()) * 1j
if not absolute:
control += current_pos
end += current_pos
segments.append(QuadraticBezier(current_pos, control, end))
current_pos = end
elif command == 'T':
if last_command not in 'QT':
control = current_pos
else:
control = current_pos + current_pos - segments[-1].control
end = float(elements.pop()) + float(elements.pop()) * 1j
if not absolute:
end += current_pos
segments.append(QuadraticBezier(current_pos, control, end))
current_pos = end
elif command == 'A':
radius = float(elements.pop()) + float(elements.pop()) * 1j
rotation = float(elements.pop())
arc = float(elements.pop())
sweep = float(elements.pop())
end = float(elements.pop()) + float(elements.pop()) * 1j
if not absolute:
end += current_pos
segments.append(Arc(current_pos, radius, rotation, arc, sweep, end))
current_pos = end
return segments
def _check_num_parsed_values(values, allowed):
if not any(num == len(values) for num in allowed):
if len(allowed) > 1:
warnings.warn('Expected one of the following number of values {0}, but found {1} values instead: {2}'
.format(allowed, len(values), values))
elif allowed[0] != 1:
warnings.warn('Expected {0} values, found {1}: {2}'.format(allowed[0], len(values), values))
else:
warnings.warn('Expected 1 value, found {0}: {1}'.format(len(values), values))
return False
return True
def parse_transform(transform_str):
warnings.warn('Transforms not implemented')
| true | true |
f71f58007a0c5588589b9d561d48fa13ca605a79 | 4,663 | py | Python | parser/fase2/team22/Instrucciones/Sql_alter/AlterTableAddColumn.py | LopDlMa/tytus | 0b43ee1c7300cb11ddbe593e08239321b71dc443 | [
"MIT"
] | null | null | null | parser/fase2/team22/Instrucciones/Sql_alter/AlterTableAddColumn.py | LopDlMa/tytus | 0b43ee1c7300cb11ddbe593e08239321b71dc443 | [
"MIT"
] | null | null | null | parser/fase2/team22/Instrucciones/Sql_alter/AlterTableAddColumn.py | LopDlMa/tytus | 0b43ee1c7300cb11ddbe593e08239321b71dc443 | [
"MIT"
] | null | null | null | from Instrucciones.TablaSimbolos.Instruccion import Instruccion
from Instrucciones.Excepcion import Excepcion
import collections
from storageManager.jsonMode import *
from Optimizador.C3D import *
from Instrucciones.TablaSimbolos import Instruccion3D as c3d
class AlterTableAddColumn(Instruccion):
def __init__(self, tabla, lista_col, strGram,linea, columna):
Instruccion.__init__(self,None,linea,columna,strGram)
self.tabla = tabla
self.lista_col = lista_col
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
if arbol.bdUsar != None:
objetoTabla = arbol.devolviendoTablaDeBase(self.tabla)
if objetoTabla != 0:
existeColumna = False
for c in self.lista_col:
for columnas in objetoTabla.lista_de_campos:
# Si la columna ya existe retorna error semántico
if columnas.nombre == c.id:
existeColumna = True
error = Excepcion('42701',"Semántico","Ya existe la columna «"+c.id+"» en la relación «"+self.tabla+"»",c.linea,c.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
if existeColumna:
return
# Existen columnas con el mismo nombre a insertar
nombres = []
for columnas in self.lista_col:
nombres.append(columnas.id)
duplicados = [item for item, count in collections.Counter(nombres).items() if count > 1]
for columnas in duplicados:
existeColumna = True
error = Excepcion('42701',"Semántico","Ya existe la columna «"+columnas+"» en la relación «"+self.tabla+"»",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
if existeColumna:
return
# Las columnas se almacenan en memoria.
for c in self.lista_col:
objetoTabla.agregarColumna(c.id, c.tipo,None, None)
# Las columnas se almacenan en disco.
for columnas in self.lista_col:
resultado = alterAddColumn(arbol.getBaseDatos(),self.tabla,columnas.id)
if resultado == 1:
error = Excepcion('XX000',"Semántico","Error interno",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
elif resultado == 2:
error = Excepcion('42P00',"Semántico","La base de datos "+str(arbol.getBaseDatos())+" no existe",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
elif resultado == 3:
error = Excepcion('42P01',"Semántico","No existe la relación "+self.tabla,self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
arbol.consola.append("Consulta devuelta correctamente.")
else:
error = Excepcion('42P01',"Semántico","No existe la relación "+self.tabla,self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
else:
error = Excepcion("100","Semantico","No ha seleccionado ninguna Base de Datos.",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
def generar3D(self, tabla, arbol):
super().generar3D(tabla,arbol)
code = []
t0 = c3d.getTemporal()
code.append(c3d.asignacionString(t0, "ALTER TABLE " + self.tabla))
t1 = c3d.getTemporal()
for col in self.lista_col:
code.append(c3d.operacion(t1, Identificador(t0), Valor(" \" ADD COLUMN " + col.id + " " + col.tipo.toString() + "\" ", "STRING"), OP_ARITMETICO.SUMA))
t0 = t1
t1 = c3d.getTemporal()
code.append(c3d.operacion(t1, Identificador(t0), Valor("\";\"", "STRING"), OP_ARITMETICO.SUMA))
code.append(c3d.asignacionTemporalStack(t1))
code.append(c3d.aumentarP())
return code | 51.811111 | 162 | 0.5638 | from Instrucciones.TablaSimbolos.Instruccion import Instruccion
from Instrucciones.Excepcion import Excepcion
import collections
from storageManager.jsonMode import *
from Optimizador.C3D import *
from Instrucciones.TablaSimbolos import Instruccion3D as c3d
class AlterTableAddColumn(Instruccion):
def __init__(self, tabla, lista_col, strGram,linea, columna):
Instruccion.__init__(self,None,linea,columna,strGram)
self.tabla = tabla
self.lista_col = lista_col
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
if arbol.bdUsar != None:
objetoTabla = arbol.devolviendoTablaDeBase(self.tabla)
if objetoTabla != 0:
existeColumna = False
for c in self.lista_col:
for columnas in objetoTabla.lista_de_campos:
if columnas.nombre == c.id:
existeColumna = True
error = Excepcion('42701',"Semántico","Ya existe la columna «"+c.id+"» en la relación «"+self.tabla+"»",c.linea,c.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
if existeColumna:
return
nombres = []
for columnas in self.lista_col:
nombres.append(columnas.id)
duplicados = [item for item, count in collections.Counter(nombres).items() if count > 1]
for columnas in duplicados:
existeColumna = True
error = Excepcion('42701',"Semántico","Ya existe la columna «"+columnas+"» en la relación «"+self.tabla+"»",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
if existeColumna:
return
for c in self.lista_col:
objetoTabla.agregarColumna(c.id, c.tipo,None, None)
for columnas in self.lista_col:
resultado = alterAddColumn(arbol.getBaseDatos(),self.tabla,columnas.id)
if resultado == 1:
error = Excepcion('XX000',"Semántico","Error interno",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
elif resultado == 2:
error = Excepcion('42P00',"Semántico","La base de datos "+str(arbol.getBaseDatos())+" no existe",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
elif resultado == 3:
error = Excepcion('42P01',"Semántico","No existe la relación "+self.tabla,self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
arbol.consola.append("Consulta devuelta correctamente.")
else:
error = Excepcion('42P01',"Semántico","No existe la relación "+self.tabla,self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
return error
else:
error = Excepcion("100","Semantico","No ha seleccionado ninguna Base de Datos.",self.linea,self.columna)
arbol.excepciones.append(error)
arbol.consola.append(error.toString())
def generar3D(self, tabla, arbol):
super().generar3D(tabla,arbol)
code = []
t0 = c3d.getTemporal()
code.append(c3d.asignacionString(t0, "ALTER TABLE " + self.tabla))
t1 = c3d.getTemporal()
for col in self.lista_col:
code.append(c3d.operacion(t1, Identificador(t0), Valor(" \" ADD COLUMN " + col.id + " " + col.tipo.toString() + "\" ", "STRING"), OP_ARITMETICO.SUMA))
t0 = t1
t1 = c3d.getTemporal()
code.append(c3d.operacion(t1, Identificador(t0), Valor("\";\"", "STRING"), OP_ARITMETICO.SUMA))
code.append(c3d.asignacionTemporalStack(t1))
code.append(c3d.aumentarP())
return code | true | true |
f71f5818bd5e30abb2dd28facc28beb49f2ea0f1 | 1,726 | py | Python | my_methods/my_cap_curve.py | noushadkhan01/my_methods | fc467d5c34b9b5dd105e32cc5aad218d3f6408a8 | [
"MIT"
] | null | null | null | my_methods/my_cap_curve.py | noushadkhan01/my_methods | fc467d5c34b9b5dd105e32cc5aad218d3f6408a8 | [
"MIT"
] | null | null | null | my_methods/my_cap_curve.py | noushadkhan01/my_methods | fc467d5c34b9b5dd105e32cc5aad218d3f6408a8 | [
"MIT"
] | null | null | null | def my_cap_curve(model, X, y, figsize = (10, 5),legend_font_size = 10,loc = 'best',
linewidth = 2,label_font_size = 10, poly_features = False, extra_name = None):
import matplotlib.pyplot as plt
import numpy as np
import my_global_variables
from sklearn.metrics import roc_curve, auc
class_name = model.__class__.__name__
if poly_features:
class_name = class_name + '_poly'
if extra_name:
class_name += '_' + extra_name
total = len(y)
class_1_count = np.sum(y)
class_0_count = total - class_1_count
probs = model.predict_proba(X)
probs = probs[:, 1]
model_y = [y for _, y in sorted(zip(probs, y), reverse = True)]
y_values = np.append([0], np.cumsum(model_y))
x_values = np.arange(0, total + 1)
# Area under Random Model
a = auc([0, total], [0, class_1_count])
# Area between Perfect and Random Model
aP = auc([0, class_1_count, total], [0, class_1_count, class_1_count]) - a
# Area between Trained and Random Model
aR = auc(x_values, y_values) - a
plt.figure(figsize = (figsize))
plt.plot([0, total], [0, class_1_count], c = 'r', linestyle = '--', label = 'Random Model')
plt.plot([0, class_1_count, total], [0, class_1_count, class_1_count], c = 'grey', linewidth = linewidth, label = 'Perfect Model')
plt.plot(x_values, y_values, c = 'b', label = f'{class_name} Classifier Accuracy Rate = {aR/aP}', linewidth = linewidth)
plt.xlabel('Total observations', fontsize = label_font_size)
plt.ylabel('Class 1 observations', fontsize = label_font_size)
plt.title('Cumulative Accuracy Profile', fontsize = label_font_size)
plt.legend(loc = loc, fontsize = legend_font_size)
plt.show()
my_global_variables.model_cap_scores[class_name] = aR/aP
| 45.421053 | 132 | 0.695829 | def my_cap_curve(model, X, y, figsize = (10, 5),legend_font_size = 10,loc = 'best',
linewidth = 2,label_font_size = 10, poly_features = False, extra_name = None):
import matplotlib.pyplot as plt
import numpy as np
import my_global_variables
from sklearn.metrics import roc_curve, auc
class_name = model.__class__.__name__
if poly_features:
class_name = class_name + '_poly'
if extra_name:
class_name += '_' + extra_name
total = len(y)
class_1_count = np.sum(y)
class_0_count = total - class_1_count
probs = model.predict_proba(X)
probs = probs[:, 1]
model_y = [y for _, y in sorted(zip(probs, y), reverse = True)]
y_values = np.append([0], np.cumsum(model_y))
x_values = np.arange(0, total + 1)
a = auc([0, total], [0, class_1_count])
aP = auc([0, class_1_count, total], [0, class_1_count, class_1_count]) - a
aR = auc(x_values, y_values) - a
plt.figure(figsize = (figsize))
plt.plot([0, total], [0, class_1_count], c = 'r', linestyle = '--', label = 'Random Model')
plt.plot([0, class_1_count, total], [0, class_1_count, class_1_count], c = 'grey', linewidth = linewidth, label = 'Perfect Model')
plt.plot(x_values, y_values, c = 'b', label = f'{class_name} Classifier Accuracy Rate = {aR/aP}', linewidth = linewidth)
plt.xlabel('Total observations', fontsize = label_font_size)
plt.ylabel('Class 1 observations', fontsize = label_font_size)
plt.title('Cumulative Accuracy Profile', fontsize = label_font_size)
plt.legend(loc = loc, fontsize = legend_font_size)
plt.show()
my_global_variables.model_cap_scores[class_name] = aR/aP
| true | true |
f71f5879feebeaca94821aab1a4522d364bde04b | 2,130 | py | Python | tests/test_setutils.py | acatton/fork--mahmoud--boltons | 8916c66121cdbbe2bfc365152d5c202096a0ad16 | [
"BSD-3-Clause"
] | 1 | 2017-05-08T17:42:01.000Z | 2017-05-08T17:42:01.000Z | tests/test_setutils.py | acatton/fork--mahmoud--boltons | 8916c66121cdbbe2bfc365152d5c202096a0ad16 | [
"BSD-3-Clause"
] | 16 | 2018-10-15T10:07:36.000Z | 2019-01-07T04:34:34.000Z | tests/test_setutils.py | r0flc0pt4/boltons | 96bd42b5cca2a8783079430b94f9930b764573e9 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from boltons.setutils import IndexedSet, _MISSING
def test_indexed_set_basic():
zero2nine = IndexedSet(range(10))
five2nine = zero2nine & IndexedSet(range(5, 15))
x = IndexedSet(five2nine)
x |= set([10])
assert list(zero2nine) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
assert set(zero2nine) == set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
assert list(five2nine) == [5, 6, 7, 8, 9]
assert x == IndexedSet([5, 6, 7, 8, 9, 10])
assert x[-1] == 10
assert zero2nine ^ five2nine == IndexedSet([0, 1, 2, 3, 4])
assert x[:3] == IndexedSet([5, 6, 7])
assert x[2:4:-1] == IndexedSet([8, 7])
def test_indexed_set_mutate():
thou = IndexedSet(range(1000))
assert (thou.pop(), thou.pop()) == (999, 998)
assert (thou.pop(499), thou.pop(499)) == (499, 500)
ref = [495, 496, 497, 498, 501, 502, 503, 504, 505, 506]
assert [thou[i] for i in range(495, 505)] == ref
assert len(thou) == 996
while len(thou) > 600:
dead_idx_len = len(thou.dead_indices)
dead_idx_count = thou._dead_index_count
thou.pop(0)
new_dead_idx_len = len(thou.dead_indices)
if new_dead_idx_len < dead_idx_len:
assert dead_idx_count > 0
# 124, 109, 95
assert len(thou) == 600
assert thou._dead_index_count == 67
assert not any([thou[i] is _MISSING for i in range(len(thou))])
thou &= IndexedSet(range(500, 503))
assert thou == IndexedSet([501, 502])
return
def big_popper():
# more of a benchmark than a test
from os import urandom
import time
big_set = IndexedSet(range(100000))
rands = [ord(r) for r in urandom(len(big_set))]
start_time, start_size = time.time(), len(big_set)
while len(big_set) > 10000:
if len(big_set) % 10000 == 0:
print(len(big_set) / 10000)
rand = rands.pop()
big_set.pop(rand)
big_set.pop(-rand)
end_time, end_size = time.time(), len(big_set)
print()
print('popped %s items in %s seconds' % (start_size - end_size,
end_time - start_time))
| 30.869565 | 68 | 0.585915 |
from boltons.setutils import IndexedSet, _MISSING
def test_indexed_set_basic():
zero2nine = IndexedSet(range(10))
five2nine = zero2nine & IndexedSet(range(5, 15))
x = IndexedSet(five2nine)
x |= set([10])
assert list(zero2nine) == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
assert set(zero2nine) == set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
assert list(five2nine) == [5, 6, 7, 8, 9]
assert x == IndexedSet([5, 6, 7, 8, 9, 10])
assert x[-1] == 10
assert zero2nine ^ five2nine == IndexedSet([0, 1, 2, 3, 4])
assert x[:3] == IndexedSet([5, 6, 7])
assert x[2:4:-1] == IndexedSet([8, 7])
def test_indexed_set_mutate():
thou = IndexedSet(range(1000))
assert (thou.pop(), thou.pop()) == (999, 998)
assert (thou.pop(499), thou.pop(499)) == (499, 500)
ref = [495, 496, 497, 498, 501, 502, 503, 504, 505, 506]
assert [thou[i] for i in range(495, 505)] == ref
assert len(thou) == 996
while len(thou) > 600:
dead_idx_len = len(thou.dead_indices)
dead_idx_count = thou._dead_index_count
thou.pop(0)
new_dead_idx_len = len(thou.dead_indices)
if new_dead_idx_len < dead_idx_len:
assert dead_idx_count > 0
assert len(thou) == 600
assert thou._dead_index_count == 67
assert not any([thou[i] is _MISSING for i in range(len(thou))])
thou &= IndexedSet(range(500, 503))
assert thou == IndexedSet([501, 502])
return
def big_popper():
from os import urandom
import time
big_set = IndexedSet(range(100000))
rands = [ord(r) for r in urandom(len(big_set))]
start_time, start_size = time.time(), len(big_set)
while len(big_set) > 10000:
if len(big_set) % 10000 == 0:
print(len(big_set) / 10000)
rand = rands.pop()
big_set.pop(rand)
big_set.pop(-rand)
end_time, end_size = time.time(), len(big_set)
print()
print('popped %s items in %s seconds' % (start_size - end_size,
end_time - start_time))
| true | true |
f71f588ae8e89518a40ae039426b0803c80db5e6 | 27,740 | py | Python | dask/array/top.py | migueltorrescosta/dask | 60f488cf7358d14c523f84de9afbb10022818367 | [
"BSD-3-Clause"
] | 1 | 2019-05-24T00:46:48.000Z | 2019-05-24T00:46:48.000Z | dask/array/top.py | migueltorrescosta/dask | 60f488cf7358d14c523f84de9afbb10022818367 | [
"BSD-3-Clause"
] | null | null | null | dask/array/top.py | migueltorrescosta/dask | 60f488cf7358d14c523f84de9afbb10022818367 | [
"BSD-3-Clause"
] | null | null | null | import itertools
import numbers
import numpy as np
import toolz
from .. import base, core, sharedict, utils
from ..compatibility import apply, Mapping
from ..delayed import to_task_dask
from ..optimization import SubgraphCallable
def subs(task, substitution):
""" Create a new task with the values substituted
This is like dask.core.subs, but takes a dict of many substitutions to
perform simultaneously. It is not as concerned with micro performance.
"""
if isinstance(task, dict):
return {k: subs(v, substitution) for k, v in task.items()}
if type(task) in (tuple, list, set):
return type(task)([subs(x, substitution) for x in task])
try:
return substitution[task]
except (KeyError, TypeError):
return task
def index_subs(ind, substitution):
""" A simple subs function that works both on tuples and strings """
if ind is None:
return ind
else:
return tuple([substitution.get(c, c) for c in ind])
def atop_token(i, prefix='_'):
return prefix + '%d' % i
def _top(func, output, output_indices, *arrind_pairs, **kwargs):
""" Create a TOP symbolic mutable mapping, given the inputs to top
This is like the ``top`` function, but rather than construct a dict, it
returns a symbolic TOP object.
See Also
--------
top
TOP
"""
numblocks = kwargs.pop('numblocks')
concatenate = kwargs.pop('concatenate', None)
new_axes = kwargs.pop('new_axes', {})
graph = sharedict.ShareDict()
# Transform indices to canonical elements
# We use terms like _0, and _1 rather than provided index elements
arrind_pairs = list(arrind_pairs)
unique_indices = {i for ii in arrind_pairs[1::2]
if ii is not None
for i in ii} | set(output_indices)
sub = {k: atop_token(i, '.')
for i, k in enumerate(sorted(unique_indices))}
output_indices = index_subs(tuple(output_indices), sub)
arrind_pairs[1::2] = [tuple(a) if a is not None else a
for a in arrind_pairs[1::2]]
arrind_pairs[1::2] = [index_subs(a, sub)
for a in arrind_pairs[1::2]]
new_axes = {index_subs((k,), sub)[0]: v for k, v in new_axes.items()}
# Unpack dask values in non-array arguments
argpairs = list(toolz.partition(2, arrind_pairs))
for i, (arg, ind) in enumerate(argpairs):
if ind is None:
arg2, dsk2 = to_task_dask(arg)
if dsk2:
graph.update(dsk2)
argpairs[i] = (arg2, ind)
# separate argpairs into two separate tuples
inputs = tuple([name for name, _ in argpairs])
inputs_indices = tuple([index for _, index in argpairs])
# Unpack delayed objects in kwargs
if kwargs:
kwargs, dsk_kwargs = to_task_dask(kwargs)
# replace keys in kwargs with _0 tokens
new_keys = list(core.get_dependencies(dsk_kwargs, task=kwargs))
new_tokens = tuple(atop_token(i) for i in range(len(inputs), len(inputs) + len(new_keys)))
sub = dict(zip(new_keys, new_tokens))
inputs = inputs + tuple(new_keys)
inputs_indices = inputs_indices + (None,) * len(new_keys)
kwargs = subs(kwargs, sub)
graph.update(dsk_kwargs)
indices = [(k, v) for k, v in zip(inputs, inputs_indices)]
keys = tuple(map(atop_token, range(len(inputs))))
# Construct local graph
if not kwargs:
dsk = {output: (func,) + keys}
else:
_keys = list(keys)
if new_keys:
_keys = _keys[:-len(new_keys)]
dsk = {output: (apply, func, _keys, kwargs)}
# Construct final output
top = TOP(output, output_indices, dsk, indices,
numblocks=numblocks, concatenate=concatenate, new_axes=new_axes)
graph.update_with_key(top, output)
graph.dependencies = {output: {arg for arg, ind in argpairs if ind is not None}}
return graph
class TOP(Mapping):
""" Tensor Operation
This is a lazily constructed mapping for tensor operation graphs.
This defines a dictionary using an operation and an indexing pattern.
It is built for many operations like elementwise, transpose, tensordot, and
so on. We choose to keep these as symbolic mappings rather than raw
dictionaries because we are able to fuse them during optimization,
sometimes resulting in much lower overhead.
See Also
--------
top
atop
"""
def __init__(self, output, output_indices, dsk, indices,
numblocks, concatenate=None, new_axes=None):
self.output = output
self.output_indices = tuple(output_indices)
self.dsk = dsk
self.indices = tuple((name, tuple(ind) if ind is not None else ind)
for name, ind in indices)
self.numblocks = numblocks
self.concatenate = concatenate
self.new_axes = new_axes or {}
@property
def _dict(self):
if hasattr(self, '_cached_dict'):
return self._cached_dict
else:
keys = tuple(map(atop_token, range(len(self.indices))))
func = SubgraphCallable(self.dsk, self.output, keys)
self._cached_dict = top(
func,
self.output,
self.output_indices,
*list(toolz.concat(self.indices)),
new_axes=self.new_axes,
numblocks=self.numblocks,
concatenate=self.concatenate
)
return self._cached_dict
def __getitem__(self, key):
return self._dict[key]
def __iter__(self):
return iter(self._dict)
def __len__(self):
return int(np.prod(list(self._out_numblocks().values())))
def _out_numblocks(self):
d = {}
indices = {k: v for k, v in self.indices if v is not None}
for k, v in self.numblocks.items():
for a, b in zip(indices[k], v):
d[a] = max(d.get(a, 0), b)
return {k: v for k, v in d.items() if k in self.output_indices}
def top(func, output, out_indices, *arrind_pairs, **kwargs):
""" Tensor operation
Applies a function, ``func``, across blocks from many different input
dasks. We arrange the pattern with which those blocks interact with sets
of matching indices. E.g.::
top(func, 'z', 'i', 'x', 'i', 'y', 'i')
yield an embarrassingly parallel communication pattern and is read as
$$ z_i = func(x_i, y_i) $$
More complex patterns may emerge, including multiple indices::
top(func, 'z', 'ij', 'x', 'ij', 'y', 'ji')
$$ z_{ij} = func(x_{ij}, y_{ji}) $$
Indices missing in the output but present in the inputs results in many
inputs being sent to one function (see examples).
Examples
--------
Simple embarrassing map operation
>>> inc = lambda x: x + 1
>>> top(inc, 'z', 'ij', 'x', 'ij', numblocks={'x': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (inc, ('x', 0, 0)),
('z', 0, 1): (inc, ('x', 0, 1)),
('z', 1, 0): (inc, ('x', 1, 0)),
('z', 1, 1): (inc, ('x', 1, 1))}
Simple operation on two datasets
>>> add = lambda x, y: x + y
>>> top(add, 'z', 'ij', 'x', 'ij', 'y', 'ij', numblocks={'x': (2, 2),
... 'y': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 1)),
('z', 1, 0): (add, ('x', 1, 0), ('y', 1, 0)),
('z', 1, 1): (add, ('x', 1, 1), ('y', 1, 1))}
Operation that flips one of the datasets
>>> addT = lambda x, y: x + y.T # Transpose each chunk
>>> # z_ij ~ x_ij y_ji
>>> # .. .. .. notice swap
>>> top(addT, 'z', 'ij', 'x', 'ij', 'y', 'ji', numblocks={'x': (2, 2),
... 'y': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 1, 0)),
('z', 1, 0): (add, ('x', 1, 0), ('y', 0, 1)),
('z', 1, 1): (add, ('x', 1, 1), ('y', 1, 1))}
Dot product with contraction over ``j`` index. Yields list arguments
>>> top(dotmany, 'z', 'ik', 'x', 'ij', 'y', 'jk', numblocks={'x': (2, 2),
... 'y': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (dotmany, [('x', 0, 0), ('x', 0, 1)],
[('y', 0, 0), ('y', 1, 0)]),
('z', 0, 1): (dotmany, [('x', 0, 0), ('x', 0, 1)],
[('y', 0, 1), ('y', 1, 1)]),
('z', 1, 0): (dotmany, [('x', 1, 0), ('x', 1, 1)],
[('y', 0, 0), ('y', 1, 0)]),
('z', 1, 1): (dotmany, [('x', 1, 0), ('x', 1, 1)],
[('y', 0, 1), ('y', 1, 1)])}
Pass ``concatenate=True`` to concatenate arrays ahead of time
>>> top(f, 'z', 'i', 'x', 'ij', 'y', 'ij', concatenate=True,
... numblocks={'x': (2, 2), 'y': (2, 2,)}) # doctest: +SKIP
{('z', 0): (f, (concatenate_axes, [('x', 0, 0), ('x', 0, 1)], (1,)),
(concatenate_axes, [('y', 0, 0), ('y', 0, 1)], (1,)))
('z', 1): (f, (concatenate_axes, [('x', 1, 0), ('x', 1, 1)], (1,)),
(concatenate_axes, [('y', 1, 0), ('y', 1, 1)], (1,)))}
Supports Broadcasting rules
>>> top(add, 'z', 'ij', 'x', 'ij', 'y', 'ij', numblocks={'x': (1, 2),
... 'y': (2, 2)}) # doctest: +SKIP
{('z', 0, 0): (add, ('x', 0, 0), ('y', 0, 0)),
('z', 0, 1): (add, ('x', 0, 1), ('y', 0, 1)),
('z', 1, 0): (add, ('x', 0, 0), ('y', 1, 0)),
('z', 1, 1): (add, ('x', 0, 1), ('y', 1, 1))}
Support keyword arguments with apply
>>> def f(a, b=0): return a + b
>>> top(f, 'z', 'i', 'x', 'i', numblocks={'x': (2,)}, b=10) # doctest: +SKIP
{('z', 0): (apply, f, [('x', 0)], {'b': 10}),
('z', 1): (apply, f, [('x', 1)], {'b': 10})}
Include literals by indexing with ``None``
>>> top(add, 'z', 'i', 'x', 'i', 100, None, numblocks={'x': (2,)}) # doctest: +SKIP
{('z', 0): (add, ('x', 0), 100),
('z', 1): (add, ('x', 1), 100)}
See Also
--------
atop
"""
from .core import broadcast_dimensions, zero_broadcast_dimensions, concatenate_axes
numblocks = kwargs.pop('numblocks')
concatenate = kwargs.pop('concatenate', None)
new_axes = kwargs.pop('new_axes', {})
argpairs = list(toolz.partition(2, arrind_pairs))
assert set(numblocks) == {name for name, ind in argpairs if ind is not None}
all_indices = {x for _, ind in argpairs if ind for x in ind}
dummy_indices = all_indices - set(out_indices)
# Dictionary mapping {i: 3, j: 4, ...} for i, j, ... the dimensions
dims = broadcast_dimensions(argpairs, numblocks)
for k in new_axes:
dims[k] = 1
# (0, 0), (0, 1), (0, 2), (1, 0), ...
keytups = list(itertools.product(*[range(dims[i]) for i in out_indices]))
# {i: 0, j: 0}, {i: 0, j: 1}, ...
keydicts = [dict(zip(out_indices, tup)) for tup in keytups]
# {j: [1, 2, 3], ...} For j a dummy index of dimension 3
dummies = dict((i, list(range(dims[i]))) for i in dummy_indices)
dsk = {}
# Create argument lists
valtups = []
for kd in keydicts:
args = []
for arg, ind in argpairs:
if ind is None:
args.append(arg)
else:
tups = lol_tuples((arg,), ind, kd, dummies)
if any(nb == 1 for nb in numblocks[arg]):
tups2 = zero_broadcast_dimensions(tups, numblocks[arg])
else:
tups2 = tups
if concatenate and isinstance(tups2, list):
axes = [n for n, i in enumerate(ind) if i in dummies]
tups2 = (concatenate_axes, tups2, axes)
args.append(tups2)
valtups.append(args)
if not kwargs: # will not be used in an apply, should be a tuple
valtups = [tuple(vt) for vt in valtups]
# Add heads to tuples
keys = [(output,) + kt for kt in keytups]
# Unpack delayed objects in kwargs
if kwargs:
task, dsk2 = to_task_dask(kwargs)
if dsk2:
dsk.update(utils.ensure_dict(dsk2))
kwargs2 = task
else:
kwargs2 = kwargs
vals = [(apply, func, vt, kwargs2) for vt in valtups]
else:
vals = [(func,) + vt for vt in valtups]
dsk.update(dict(zip(keys, vals)))
return dsk
def atop(func, out_ind, *args, **kwargs):
""" Tensor operation: Generalized inner and outer products
A broad class of blocked algorithms and patterns can be specified with a
concise multi-index notation. The ``atop`` function applies an in-memory
function across multiple blocks of multiple inputs in a variety of ways.
Many dask.array operations are special cases of atop including elementwise,
broadcasting, reductions, tensordot, and transpose.
Parameters
----------
func : callable
Function to apply to individual tuples of blocks
out_ind : iterable
Block pattern of the output, something like 'ijk' or (1, 2, 3)
*args : sequence of Array, index pairs
Sequence like (x, 'ij', y, 'jk', z, 'i')
**kwargs : dict
Extra keyword arguments to pass to function
dtype : np.dtype
Datatype of resulting array.
concatenate : bool, keyword only
If true concatenate arrays along dummy indices, else provide lists
adjust_chunks : dict
Dictionary mapping index to function to be applied to chunk sizes
new_axes : dict, keyword only
New indexes and their dimension lengths
Examples
--------
2D embarrassingly parallel operation from two arrays, x, and y.
>>> z = atop(operator.add, 'ij', x, 'ij', y, 'ij', dtype='f8') # z = x + y # doctest: +SKIP
Outer product multiplying x by y, two 1-d vectors
>>> z = atop(operator.mul, 'ij', x, 'i', y, 'j', dtype='f8') # doctest: +SKIP
z = x.T
>>> z = atop(np.transpose, 'ji', x, 'ij', dtype=x.dtype) # doctest: +SKIP
The transpose case above is illustrative because it does same transposition
both on each in-memory block by calling ``np.transpose`` and on the order
of the blocks themselves, by switching the order of the index ``ij -> ji``.
We can compose these same patterns with more variables and more complex
in-memory functions
z = X + Y.T
>>> z = atop(lambda x, y: x + y.T, 'ij', x, 'ij', y, 'ji', dtype='f8') # doctest: +SKIP
Any index, like ``i`` missing from the output index is interpreted as a
contraction (note that this differs from Einstein convention; repeated
indices do not imply contraction.) In the case of a contraction the passed
function should expect an iterable of blocks on any array that holds that
index. To receive arrays concatenated along contracted dimensions instead
pass ``concatenate=True``.
Inner product multiplying x by y, two 1-d vectors
>>> def sequence_dot(x_blocks, y_blocks):
... result = 0
... for x, y in zip(x_blocks, y_blocks):
... result += x.dot(y)
... return result
>>> z = atop(sequence_dot, '', x, 'i', y, 'i', dtype='f8') # doctest: +SKIP
Add new single-chunk dimensions with the ``new_axes=`` keyword, including
the length of the new dimension. New dimensions will always be in a single
chunk.
>>> def f(x):
... return x[:, None] * np.ones((1, 5))
>>> z = atop(f, 'az', x, 'a', new_axes={'z': 5}, dtype=x.dtype) # doctest: +SKIP
If the applied function changes the size of each chunk you can specify this
with a ``adjust_chunks={...}`` dictionary holding a function for each index
that modifies the dimension size in that index.
>>> def double(x):
... return np.concatenate([x, x])
>>> y = atop(double, 'ij', x, 'ij',
... adjust_chunks={'i': lambda n: 2 * n}, dtype=x.dtype) # doctest: +SKIP
Include literals by indexing with None
>>> y = atop(add, 'ij', x, 'ij', 1234, None, dtype=x.dtype) # doctest: +SKIP
See Also
--------
top - dict formulation of this function, contains most logic
"""
out = kwargs.pop('name', None) # May be None at this point
token = kwargs.pop('token', None)
dtype = kwargs.pop('dtype', None)
adjust_chunks = kwargs.pop('adjust_chunks', None)
new_axes = kwargs.get('new_axes', {})
# Input Validation
if len(set(out_ind)) != len(out_ind):
raise ValueError("Repeated elements not allowed in output index",
[k for k, v in toolz.frequencies(out_ind).items() if v > 1])
new = (set(out_ind)
- {a for arg in args[1::2] if arg is not None for a in arg}
- set(new_axes or ()))
if new:
raise ValueError("Unknown dimension", new)
from .core import Array, unify_chunks, normalize_arg
if dtype is None:
raise ValueError("Must specify dtype of output array")
chunkss, arrays = unify_chunks(*args)
for k, v in new_axes.items():
chunkss[k] = (v,)
arginds = list(zip(arrays, args[1::2]))
for arg, ind in arginds:
if hasattr(arg, 'ndim') and hasattr(ind, '__len__') and arg.ndim != len(ind):
raise ValueError("Index string %s does not match array dimension %d"
% (ind, arg.ndim))
numblocks = {a.name: a.numblocks for a, ind in arginds if ind is not None}
argindsstr = list(toolz.concat([(normalize_arg(a) if ind is None else a.name, ind)
for a, ind in arginds]))
# Finish up the name
if not out:
out = '%s-%s' % (token or utils.funcname(func).strip('_'),
base.tokenize(func, out_ind, argindsstr, dtype, **kwargs))
kwargs2 = {k: normalize_arg(v) for k, v in kwargs.items()}
dsk = _top(func, out, out_ind, *argindsstr, numblocks=numblocks, **kwargs2)
dsks = [a.dask for a, ind in arginds if ind is not None]
chunks = [chunkss[i] for i in out_ind]
if adjust_chunks:
for i, ind in enumerate(out_ind):
if ind in adjust_chunks:
if callable(adjust_chunks[ind]):
chunks[i] = tuple(map(adjust_chunks[ind], chunks[i]))
elif isinstance(adjust_chunks[ind], numbers.Integral):
chunks[i] = tuple(adjust_chunks[ind] for _ in chunks[i])
elif isinstance(adjust_chunks[ind], (tuple, list)):
chunks[i] = tuple(adjust_chunks[ind])
else:
raise NotImplementedError(
"adjust_chunks values must be callable, int, or tuple")
chunks = tuple(chunks)
return Array(sharedict.merge((out, dsk), *dsks,
dependencies={out: {a.name for a, ind in arginds if ind is not None}}),
out, chunks, dtype=dtype)
def lol_tuples(head, ind, values, dummies):
""" List of list of tuple keys
Parameters
----------
head : tuple
The known tuple so far
ind : Iterable
An iterable of indices not yet covered
values : dict
Known values for non-dummy indices
dummies : dict
Ranges of values for dummy indices
Examples
--------
>>> lol_tuples(('x',), 'ij', {'i': 1, 'j': 0}, {})
('x', 1, 0)
>>> lol_tuples(('x',), 'ij', {'i': 1}, {'j': range(3)})
[('x', 1, 0), ('x', 1, 1), ('x', 1, 2)]
>>> lol_tuples(('x',), 'ij', {'i': 1}, {'j': range(3)})
[('x', 1, 0), ('x', 1, 1), ('x', 1, 2)]
>>> lol_tuples(('x',), 'ijk', {'i': 1}, {'j': [0, 1, 2], 'k': [0, 1]}) # doctest: +NORMALIZE_WHITESPACE
[[('x', 1, 0, 0), ('x', 1, 0, 1)],
[('x', 1, 1, 0), ('x', 1, 1, 1)],
[('x', 1, 2, 0), ('x', 1, 2, 1)]]
"""
if not ind:
return head
if ind[0] not in dummies:
return lol_tuples(head + (values[ind[0]],), ind[1:], values, dummies)
else:
return [lol_tuples(head + (v,), ind[1:], values, dummies)
for v in dummies[ind[0]]]
def optimize_atop(full_graph, keys=()):
""" High level optimization of stacked TOP layers
For operations that have multiple TOP operations one after the other, like
``x.T + 123`` we can fuse these into a single TOP operation. This happens
before any actual tasks are generated, and so can reduce overhead.
This finds groups of TOP operations that can be safely fused, and then
passes them to ``rewrite_atop`` for rewriting.
Parameters
----------
full_graph: ShareDict
keys: Iterable
The keys of all outputs of all collections.
Used to make sure that we don't fuse a layer needed by an output
Returns
-------
sharedict : ShareDict
See Also
--------
rewrite_atop
"""
keep = {k[0] if type(k) is tuple else k for k in keys}
layers = full_graph.dicts
dependents = core.reverse_dict(full_graph.dependencies)
roots = {k for k in full_graph.dicts
if not dependents.get(k)}
stack = list(roots)
out = {}
dependencies = {}
seen = set()
while stack:
layer = stack.pop()
if layer in seen or layer not in layers:
continue
seen.add(layer)
# Outer loop walks through possible output TOP layers
if isinstance(layers[layer], TOP):
top_layers = {layer}
deps = set(top_layers)
while deps: # we gather as many sub-layers as we can
dep = deps.pop()
if dep not in layers:
stack.append(dep)
continue
if not isinstance(layers[dep], TOP):
stack.append(dep)
continue
if (dep != layer and dep in keep):
stack.append(dep)
continue
if layers[dep].concatenate != layers[layer].concatenate:
stack.append(dep)
continue
# passed everything, proceed
top_layers.add(dep)
# traverse further to this child's children
for d in full_graph.dependencies.get(dep, ()):
# Don't allow reductions to proceed
output_indices = set(layers[dep].output_indices)
input_indices = {i for _, ind in layers[dep].indices if ind for i in ind}
if len(dependents[d]) <= 1 and output_indices.issuperset(input_indices):
deps.add(d)
else:
stack.append(d)
# Merge these TOP layers into one
new_layer = rewrite_atop([layers[l] for l in top_layers])
out[layer] = new_layer
dependencies[layer] = {k for k, v in new_layer.indices if v is not None}
else:
out[layer] = layers[layer]
dependencies[layer] = full_graph.dependencies.get(layer, set())
stack.extend(full_graph.dependencies.get(layer, ()))
return sharedict.ShareDict(out, dependencies)
def rewrite_atop(inputs):
""" Rewrite a stack of atop expressions into a single atop expression
Given a set of TOP layers, combine them into a single layer. The provided
layers are expected to fit well together. That job is handled by
``optimize_atop``
Parameters
----------
inputs : List[TOP]
Returns
-------
top : TOP
See Also
--------
optimize_atop
"""
inputs = {inp.output: inp for inp in inputs}
dependencies = {inp.output: {d for d, v in inp.indices
if v is not None and d in inputs}
for inp in inputs.values()}
dependents = core.reverse_dict(dependencies)
new_index_iter = (c + (str(d) if d else '') # A, B, ... A1, B1, ...
for d in itertools.count()
for c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ')
[root] = [k for k, v in dependents.items() if not v]
# Our final results. These will change during fusion below
indices = list(inputs[root].indices)
new_axes = inputs[root].new_axes
concatenate = inputs[root].concatenate
dsk = dict(inputs[root].dsk)
changed = True
while changed:
changed = False
for i, (dep, ind) in enumerate(indices):
if ind is None:
continue
if dep not in inputs:
continue
changed = True
# Replace _n with dep name in existing tasks
# (inc, _0) -> (inc, 'b')
dsk = {k: subs(v, {atop_token(i): dep}) for k, v in dsk.items()}
# Remove current input from input indices
# [('a', 'i'), ('b', 'i')] -> [('a', 'i')]
_, current_dep_indices = indices.pop(i)
sub = {atop_token(i): atop_token(i - 1) for i in range(i + 1, len(indices) + 1)}
dsk = subs(dsk, sub)
# Change new input_indices to match give index from current computation
# [('c', j')] -> [('c', 'i')]
new_indices = inputs[dep].indices
sub = dict(zip(inputs[dep].output_indices, current_dep_indices))
contracted = {x for _, j in new_indices
if j is not None
for x in j
if x not in inputs[dep].output_indices}
extra = dict(zip(contracted, new_index_iter))
sub.update(extra)
new_indices = [(x, index_subs(j, sub)) for x, j in new_indices]
# Update new_axes
for k, v in inputs[dep].new_axes.items():
new_axes[sub[k]] = v
# Bump new inputs up in list
sub = {}
for i, index in enumerate(new_indices):
try:
contains = index in indices
except (ValueError, TypeError):
contains = False
if contains: # use old inputs if available
sub[atop_token(i)] = atop_token(indices.index(index))
else:
sub[atop_token(i)] = atop_token(len(indices))
indices.append(index)
new_dsk = subs(inputs[dep].dsk, sub)
# indices.extend(new_indices)
dsk.update(new_dsk)
indices = [(a, tuple(b) if isinstance(b, list) else b)
for a, b in indices]
# De-duplicate indices like [(a, ij), (b, i), (a, ij)] -> [(a, ij), (b, i)]
# Make sure that we map everything else appropriately as we remove inputs
new_indices = []
seen = {}
sub = {} # like {_0: _0, _1: _0, _2: _1}
for i, x in enumerate(indices):
if x[1] is not None and x in seen:
sub[i] = seen[x]
else:
if x[1] is not None:
seen[x] = len(new_indices)
sub[i] = len(new_indices)
new_indices.append(x)
sub = {atop_token(k): atop_token(v) for k, v in sub.items()}
dsk = {k: subs(v, sub) for k, v in dsk.items()}
indices_check = {k for k, v in indices if v is not None}
numblocks = toolz.merge([inp.numblocks for inp in inputs.values()])
numblocks = {k: v for k, v in numblocks.items()
if v is None or k in indices_check}
out = TOP(root, inputs[root].output_indices, dsk, new_indices,
numblocks=numblocks, new_axes=new_axes, concatenate=concatenate)
return out
| 36.308901 | 107 | 0.548594 | import itertools
import numbers
import numpy as np
import toolz
from .. import base, core, sharedict, utils
from ..compatibility import apply, Mapping
from ..delayed import to_task_dask
from ..optimization import SubgraphCallable
def subs(task, substitution):
if isinstance(task, dict):
return {k: subs(v, substitution) for k, v in task.items()}
if type(task) in (tuple, list, set):
return type(task)([subs(x, substitution) for x in task])
try:
return substitution[task]
except (KeyError, TypeError):
return task
def index_subs(ind, substitution):
if ind is None:
return ind
else:
return tuple([substitution.get(c, c) for c in ind])
def atop_token(i, prefix='_'):
return prefix + '%d' % i
def _top(func, output, output_indices, *arrind_pairs, **kwargs):
numblocks = kwargs.pop('numblocks')
concatenate = kwargs.pop('concatenate', None)
new_axes = kwargs.pop('new_axes', {})
graph = sharedict.ShareDict()
arrind_pairs = list(arrind_pairs)
unique_indices = {i for ii in arrind_pairs[1::2]
if ii is not None
for i in ii} | set(output_indices)
sub = {k: atop_token(i, '.')
for i, k in enumerate(sorted(unique_indices))}
output_indices = index_subs(tuple(output_indices), sub)
arrind_pairs[1::2] = [tuple(a) if a is not None else a
for a in arrind_pairs[1::2]]
arrind_pairs[1::2] = [index_subs(a, sub)
for a in arrind_pairs[1::2]]
new_axes = {index_subs((k,), sub)[0]: v for k, v in new_axes.items()}
argpairs = list(toolz.partition(2, arrind_pairs))
for i, (arg, ind) in enumerate(argpairs):
if ind is None:
arg2, dsk2 = to_task_dask(arg)
if dsk2:
graph.update(dsk2)
argpairs[i] = (arg2, ind)
inputs = tuple([name for name, _ in argpairs])
inputs_indices = tuple([index for _, index in argpairs])
if kwargs:
kwargs, dsk_kwargs = to_task_dask(kwargs)
new_keys = list(core.get_dependencies(dsk_kwargs, task=kwargs))
new_tokens = tuple(atop_token(i) for i in range(len(inputs), len(inputs) + len(new_keys)))
sub = dict(zip(new_keys, new_tokens))
inputs = inputs + tuple(new_keys)
inputs_indices = inputs_indices + (None,) * len(new_keys)
kwargs = subs(kwargs, sub)
graph.update(dsk_kwargs)
indices = [(k, v) for k, v in zip(inputs, inputs_indices)]
keys = tuple(map(atop_token, range(len(inputs))))
if not kwargs:
dsk = {output: (func,) + keys}
else:
_keys = list(keys)
if new_keys:
_keys = _keys[:-len(new_keys)]
dsk = {output: (apply, func, _keys, kwargs)}
top = TOP(output, output_indices, dsk, indices,
numblocks=numblocks, concatenate=concatenate, new_axes=new_axes)
graph.update_with_key(top, output)
graph.dependencies = {output: {arg for arg, ind in argpairs if ind is not None}}
return graph
class TOP(Mapping):
def __init__(self, output, output_indices, dsk, indices,
numblocks, concatenate=None, new_axes=None):
self.output = output
self.output_indices = tuple(output_indices)
self.dsk = dsk
self.indices = tuple((name, tuple(ind) if ind is not None else ind)
for name, ind in indices)
self.numblocks = numblocks
self.concatenate = concatenate
self.new_axes = new_axes or {}
@property
def _dict(self):
if hasattr(self, '_cached_dict'):
return self._cached_dict
else:
keys = tuple(map(atop_token, range(len(self.indices))))
func = SubgraphCallable(self.dsk, self.output, keys)
self._cached_dict = top(
func,
self.output,
self.output_indices,
*list(toolz.concat(self.indices)),
new_axes=self.new_axes,
numblocks=self.numblocks,
concatenate=self.concatenate
)
return self._cached_dict
def __getitem__(self, key):
return self._dict[key]
def __iter__(self):
return iter(self._dict)
def __len__(self):
return int(np.prod(list(self._out_numblocks().values())))
def _out_numblocks(self):
d = {}
indices = {k: v for k, v in self.indices if v is not None}
for k, v in self.numblocks.items():
for a, b in zip(indices[k], v):
d[a] = max(d.get(a, 0), b)
return {k: v for k, v in d.items() if k in self.output_indices}
def top(func, output, out_indices, *arrind_pairs, **kwargs):
from .core import broadcast_dimensions, zero_broadcast_dimensions, concatenate_axes
numblocks = kwargs.pop('numblocks')
concatenate = kwargs.pop('concatenate', None)
new_axes = kwargs.pop('new_axes', {})
argpairs = list(toolz.partition(2, arrind_pairs))
assert set(numblocks) == {name for name, ind in argpairs if ind is not None}
all_indices = {x for _, ind in argpairs if ind for x in ind}
dummy_indices = all_indices - set(out_indices)
dims = broadcast_dimensions(argpairs, numblocks)
for k in new_axes:
dims[k] = 1
keytups = list(itertools.product(*[range(dims[i]) for i in out_indices]))
keydicts = [dict(zip(out_indices, tup)) for tup in keytups]
dummies = dict((i, list(range(dims[i]))) for i in dummy_indices)
dsk = {}
valtups = []
for kd in keydicts:
args = []
for arg, ind in argpairs:
if ind is None:
args.append(arg)
else:
tups = lol_tuples((arg,), ind, kd, dummies)
if any(nb == 1 for nb in numblocks[arg]):
tups2 = zero_broadcast_dimensions(tups, numblocks[arg])
else:
tups2 = tups
if concatenate and isinstance(tups2, list):
axes = [n for n, i in enumerate(ind) if i in dummies]
tups2 = (concatenate_axes, tups2, axes)
args.append(tups2)
valtups.append(args)
if not kwargs:
valtups = [tuple(vt) for vt in valtups]
keys = [(output,) + kt for kt in keytups]
if kwargs:
task, dsk2 = to_task_dask(kwargs)
if dsk2:
dsk.update(utils.ensure_dict(dsk2))
kwargs2 = task
else:
kwargs2 = kwargs
vals = [(apply, func, vt, kwargs2) for vt in valtups]
else:
vals = [(func,) + vt for vt in valtups]
dsk.update(dict(zip(keys, vals)))
return dsk
def atop(func, out_ind, *args, **kwargs):
out = kwargs.pop('name', None)
token = kwargs.pop('token', None)
dtype = kwargs.pop('dtype', None)
adjust_chunks = kwargs.pop('adjust_chunks', None)
new_axes = kwargs.get('new_axes', {})
if len(set(out_ind)) != len(out_ind):
raise ValueError("Repeated elements not allowed in output index",
[k for k, v in toolz.frequencies(out_ind).items() if v > 1])
new = (set(out_ind)
- {a for arg in args[1::2] if arg is not None for a in arg}
- set(new_axes or ()))
if new:
raise ValueError("Unknown dimension", new)
from .core import Array, unify_chunks, normalize_arg
if dtype is None:
raise ValueError("Must specify dtype of output array")
chunkss, arrays = unify_chunks(*args)
for k, v in new_axes.items():
chunkss[k] = (v,)
arginds = list(zip(arrays, args[1::2]))
for arg, ind in arginds:
if hasattr(arg, 'ndim') and hasattr(ind, '__len__') and arg.ndim != len(ind):
raise ValueError("Index string %s does not match array dimension %d"
% (ind, arg.ndim))
numblocks = {a.name: a.numblocks for a, ind in arginds if ind is not None}
argindsstr = list(toolz.concat([(normalize_arg(a) if ind is None else a.name, ind)
for a, ind in arginds]))
if not out:
out = '%s-%s' % (token or utils.funcname(func).strip('_'),
base.tokenize(func, out_ind, argindsstr, dtype, **kwargs))
kwargs2 = {k: normalize_arg(v) for k, v in kwargs.items()}
dsk = _top(func, out, out_ind, *argindsstr, numblocks=numblocks, **kwargs2)
dsks = [a.dask for a, ind in arginds if ind is not None]
chunks = [chunkss[i] for i in out_ind]
if adjust_chunks:
for i, ind in enumerate(out_ind):
if ind in adjust_chunks:
if callable(adjust_chunks[ind]):
chunks[i] = tuple(map(adjust_chunks[ind], chunks[i]))
elif isinstance(adjust_chunks[ind], numbers.Integral):
chunks[i] = tuple(adjust_chunks[ind] for _ in chunks[i])
elif isinstance(adjust_chunks[ind], (tuple, list)):
chunks[i] = tuple(adjust_chunks[ind])
else:
raise NotImplementedError(
"adjust_chunks values must be callable, int, or tuple")
chunks = tuple(chunks)
return Array(sharedict.merge((out, dsk), *dsks,
dependencies={out: {a.name for a, ind in arginds if ind is not None}}),
out, chunks, dtype=dtype)
def lol_tuples(head, ind, values, dummies):
if not ind:
return head
if ind[0] not in dummies:
return lol_tuples(head + (values[ind[0]],), ind[1:], values, dummies)
else:
return [lol_tuples(head + (v,), ind[1:], values, dummies)
for v in dummies[ind[0]]]
def optimize_atop(full_graph, keys=()):
keep = {k[0] if type(k) is tuple else k for k in keys}
layers = full_graph.dicts
dependents = core.reverse_dict(full_graph.dependencies)
roots = {k for k in full_graph.dicts
if not dependents.get(k)}
stack = list(roots)
out = {}
dependencies = {}
seen = set()
while stack:
layer = stack.pop()
if layer in seen or layer not in layers:
continue
seen.add(layer)
if isinstance(layers[layer], TOP):
top_layers = {layer}
deps = set(top_layers)
while deps:
dep = deps.pop()
if dep not in layers:
stack.append(dep)
continue
if not isinstance(layers[dep], TOP):
stack.append(dep)
continue
if (dep != layer and dep in keep):
stack.append(dep)
continue
if layers[dep].concatenate != layers[layer].concatenate:
stack.append(dep)
continue
top_layers.add(dep)
for d in full_graph.dependencies.get(dep, ()):
# Don't allow reductions to proceed
output_indices = set(layers[dep].output_indices)
input_indices = {i for _, ind in layers[dep].indices if ind for i in ind}
if len(dependents[d]) <= 1 and output_indices.issuperset(input_indices):
deps.add(d)
else:
stack.append(d)
new_layer = rewrite_atop([layers[l] for l in top_layers])
out[layer] = new_layer
dependencies[layer] = {k for k, v in new_layer.indices if v is not None}
else:
out[layer] = layers[layer]
dependencies[layer] = full_graph.dependencies.get(layer, set())
stack.extend(full_graph.dependencies.get(layer, ()))
return sharedict.ShareDict(out, dependencies)
def rewrite_atop(inputs):
inputs = {inp.output: inp for inp in inputs}
dependencies = {inp.output: {d for d, v in inp.indices
if v is not None and d in inputs}
for inp in inputs.values()}
dependents = core.reverse_dict(dependencies)
new_index_iter = (c + (str(d) if d else '')
for d in itertools.count()
for c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ')
[root] = [k for k, v in dependents.items() if not v]
indices = list(inputs[root].indices)
new_axes = inputs[root].new_axes
concatenate = inputs[root].concatenate
dsk = dict(inputs[root].dsk)
changed = True
while changed:
changed = False
for i, (dep, ind) in enumerate(indices):
if ind is None:
continue
if dep not in inputs:
continue
changed = True
dsk = {k: subs(v, {atop_token(i): dep}) for k, v in dsk.items()}
_, current_dep_indices = indices.pop(i)
sub = {atop_token(i): atop_token(i - 1) for i in range(i + 1, len(indices) + 1)}
dsk = subs(dsk, sub)
new_indices = inputs[dep].indices
sub = dict(zip(inputs[dep].output_indices, current_dep_indices))
contracted = {x for _, j in new_indices
if j is not None
for x in j
if x not in inputs[dep].output_indices}
extra = dict(zip(contracted, new_index_iter))
sub.update(extra)
new_indices = [(x, index_subs(j, sub)) for x, j in new_indices]
# Update new_axes
for k, v in inputs[dep].new_axes.items():
new_axes[sub[k]] = v
# Bump new inputs up in list
sub = {}
for i, index in enumerate(new_indices):
try:
contains = index in indices
except (ValueError, TypeError):
contains = False
if contains: # use old inputs if available
sub[atop_token(i)] = atop_token(indices.index(index))
else:
sub[atop_token(i)] = atop_token(len(indices))
indices.append(index)
new_dsk = subs(inputs[dep].dsk, sub)
# indices.extend(new_indices)
dsk.update(new_dsk)
indices = [(a, tuple(b) if isinstance(b, list) else b)
for a, b in indices]
# De-duplicate indices like [(a, ij), (b, i), (a, ij)] -> [(a, ij), (b, i)]
# Make sure that we map everything else appropriately as we remove inputs
new_indices = []
seen = {}
sub = {} # like {_0: _0, _1: _0, _2: _1}
for i, x in enumerate(indices):
if x[1] is not None and x in seen:
sub[i] = seen[x]
else:
if x[1] is not None:
seen[x] = len(new_indices)
sub[i] = len(new_indices)
new_indices.append(x)
sub = {atop_token(k): atop_token(v) for k, v in sub.items()}
dsk = {k: subs(v, sub) for k, v in dsk.items()}
indices_check = {k for k, v in indices if v is not None}
numblocks = toolz.merge([inp.numblocks for inp in inputs.values()])
numblocks = {k: v for k, v in numblocks.items()
if v is None or k in indices_check}
out = TOP(root, inputs[root].output_indices, dsk, new_indices,
numblocks=numblocks, new_axes=new_axes, concatenate=concatenate)
return out
| true | true |
f71f58c1649fd2690611e738744d6c22a955fdf0 | 4,419 | py | Python | sherpa_client/models/http_service_metadata.py | kairntech/sherpa-client | cd259c87b7291eeec3f3ea025e368f2f069a06cd | [
"Apache-2.0"
] | null | null | null | sherpa_client/models/http_service_metadata.py | kairntech/sherpa-client | cd259c87b7291eeec3f3ea025e368f2f069a06cd | [
"Apache-2.0"
] | null | null | null | sherpa_client/models/http_service_metadata.py | kairntech/sherpa-client | cd259c87b7291eeec3f3ea025e368f2f069a06cd | [
"Apache-2.0"
] | null | null | null | from typing import Any, Dict, Type, TypeVar, Union
import attr
from ..models.http_service_metadata_operations import HttpServiceMetadataOperations
from ..types import UNSET, Unset
T = TypeVar("T", bound="HttpServiceMetadata")
@attr.s(auto_attribs=True)
class HttpServiceMetadata:
""" """
api: str
compatibility: str
version: str
annotators: Union[Unset, str] = UNSET
converters: Union[Unset, str] = UNSET
engine: Union[Unset, str] = UNSET
extensions: Union[Unset, str] = UNSET
formatters: Union[Unset, str] = UNSET
functions: Union[Unset, str] = UNSET
languages: Union[Unset, str] = UNSET
natures: Union[Unset, str] = UNSET
operations: Union[Unset, HttpServiceMetadataOperations] = UNSET
processors: Union[Unset, str] = UNSET
term_importers: Union[Unset, str] = UNSET
trigger: Union[Unset, str] = UNSET
def to_dict(self) -> Dict[str, Any]:
api = self.api
compatibility = self.compatibility
version = self.version
annotators = self.annotators
converters = self.converters
engine = self.engine
extensions = self.extensions
formatters = self.formatters
functions = self.functions
languages = self.languages
natures = self.natures
operations: Union[Unset, Dict[str, Any]] = UNSET
if not isinstance(self.operations, Unset):
operations = self.operations.to_dict()
processors = self.processors
term_importers = self.term_importers
trigger = self.trigger
field_dict: Dict[str, Any] = {}
field_dict.update(
{
"api": api,
"compatibility": compatibility,
"version": version,
}
)
if annotators is not UNSET:
field_dict["annotators"] = annotators
if converters is not UNSET:
field_dict["converters"] = converters
if engine is not UNSET:
field_dict["engine"] = engine
if extensions is not UNSET:
field_dict["extensions"] = extensions
if formatters is not UNSET:
field_dict["formatters"] = formatters
if functions is not UNSET:
field_dict["functions"] = functions
if languages is not UNSET:
field_dict["languages"] = languages
if natures is not UNSET:
field_dict["natures"] = natures
if operations is not UNSET:
field_dict["operations"] = operations
if processors is not UNSET:
field_dict["processors"] = processors
if term_importers is not UNSET:
field_dict["termImporters"] = term_importers
if trigger is not UNSET:
field_dict["trigger"] = trigger
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
api = d.pop("api")
compatibility = d.pop("compatibility")
version = d.pop("version")
annotators = d.pop("annotators", UNSET)
converters = d.pop("converters", UNSET)
engine = d.pop("engine", UNSET)
extensions = d.pop("extensions", UNSET)
formatters = d.pop("formatters", UNSET)
functions = d.pop("functions", UNSET)
languages = d.pop("languages", UNSET)
natures = d.pop("natures", UNSET)
_operations = d.pop("operations", UNSET)
operations: Union[Unset, HttpServiceMetadataOperations]
if isinstance(_operations, Unset):
operations = UNSET
else:
operations = HttpServiceMetadataOperations.from_dict(_operations)
processors = d.pop("processors", UNSET)
term_importers = d.pop("termImporters", UNSET)
trigger = d.pop("trigger", UNSET)
http_service_metadata = cls(
api=api,
compatibility=compatibility,
version=version,
annotators=annotators,
converters=converters,
engine=engine,
extensions=extensions,
formatters=formatters,
functions=functions,
languages=languages,
natures=natures,
operations=operations,
processors=processors,
term_importers=term_importers,
trigger=trigger,
)
return http_service_metadata
| 30.902098 | 83 | 0.604435 | from typing import Any, Dict, Type, TypeVar, Union
import attr
from ..models.http_service_metadata_operations import HttpServiceMetadataOperations
from ..types import UNSET, Unset
T = TypeVar("T", bound="HttpServiceMetadata")
@attr.s(auto_attribs=True)
class HttpServiceMetadata:
api: str
compatibility: str
version: str
annotators: Union[Unset, str] = UNSET
converters: Union[Unset, str] = UNSET
engine: Union[Unset, str] = UNSET
extensions: Union[Unset, str] = UNSET
formatters: Union[Unset, str] = UNSET
functions: Union[Unset, str] = UNSET
languages: Union[Unset, str] = UNSET
natures: Union[Unset, str] = UNSET
operations: Union[Unset, HttpServiceMetadataOperations] = UNSET
processors: Union[Unset, str] = UNSET
term_importers: Union[Unset, str] = UNSET
trigger: Union[Unset, str] = UNSET
def to_dict(self) -> Dict[str, Any]:
api = self.api
compatibility = self.compatibility
version = self.version
annotators = self.annotators
converters = self.converters
engine = self.engine
extensions = self.extensions
formatters = self.formatters
functions = self.functions
languages = self.languages
natures = self.natures
operations: Union[Unset, Dict[str, Any]] = UNSET
if not isinstance(self.operations, Unset):
operations = self.operations.to_dict()
processors = self.processors
term_importers = self.term_importers
trigger = self.trigger
field_dict: Dict[str, Any] = {}
field_dict.update(
{
"api": api,
"compatibility": compatibility,
"version": version,
}
)
if annotators is not UNSET:
field_dict["annotators"] = annotators
if converters is not UNSET:
field_dict["converters"] = converters
if engine is not UNSET:
field_dict["engine"] = engine
if extensions is not UNSET:
field_dict["extensions"] = extensions
if formatters is not UNSET:
field_dict["formatters"] = formatters
if functions is not UNSET:
field_dict["functions"] = functions
if languages is not UNSET:
field_dict["languages"] = languages
if natures is not UNSET:
field_dict["natures"] = natures
if operations is not UNSET:
field_dict["operations"] = operations
if processors is not UNSET:
field_dict["processors"] = processors
if term_importers is not UNSET:
field_dict["termImporters"] = term_importers
if trigger is not UNSET:
field_dict["trigger"] = trigger
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
api = d.pop("api")
compatibility = d.pop("compatibility")
version = d.pop("version")
annotators = d.pop("annotators", UNSET)
converters = d.pop("converters", UNSET)
engine = d.pop("engine", UNSET)
extensions = d.pop("extensions", UNSET)
formatters = d.pop("formatters", UNSET)
functions = d.pop("functions", UNSET)
languages = d.pop("languages", UNSET)
natures = d.pop("natures", UNSET)
_operations = d.pop("operations", UNSET)
operations: Union[Unset, HttpServiceMetadataOperations]
if isinstance(_operations, Unset):
operations = UNSET
else:
operations = HttpServiceMetadataOperations.from_dict(_operations)
processors = d.pop("processors", UNSET)
term_importers = d.pop("termImporters", UNSET)
trigger = d.pop("trigger", UNSET)
http_service_metadata = cls(
api=api,
compatibility=compatibility,
version=version,
annotators=annotators,
converters=converters,
engine=engine,
extensions=extensions,
formatters=formatters,
functions=functions,
languages=languages,
natures=natures,
operations=operations,
processors=processors,
term_importers=term_importers,
trigger=trigger,
)
return http_service_metadata
| true | true |
f71f5c880c576a98b3a2c7865445d8aef1babbe3 | 5,734 | py | Python | ivy/func_wrapper.py | sert121/ivy | 286f86e487b0c83d46a3ef8d30aa96316337db32 | [
"Apache-2.0"
] | 1 | 2022-02-15T02:07:07.000Z | 2022-02-15T02:07:07.000Z | ivy/func_wrapper.py | sert121/ivy | 286f86e487b0c83d46a3ef8d30aa96316337db32 | [
"Apache-2.0"
] | null | null | null | ivy/func_wrapper.py | sert121/ivy | 286f86e487b0c83d46a3ef8d30aa96316337db32 | [
"Apache-2.0"
] | null | null | null | import ivy
import inspect
import importlib
import numpy as np
from types import ModuleType
wrapped_modules_n_classes = []
NON_WRAPPED_METHODS = ['current_framework', 'current_framework_str', 'set_framework', 'get_framework',
'unset_framework', 'set_debug_mode', 'set_breakpoint_debug_mode', 'set_exception_debug_mode',
'unset_debug_mode', 'debug_mode', 'nested_map', 'to_ivy', 'args_to_ivy', 'to_native',
'args_to_native', 'default', 'exists', 'set_min_base', 'get_min_base', 'set_min_denominator',
'get_min_denominator', 'split_func_call_across_gpus', 'cache_fn', 'split_func_call',
'compile', 'compile_graph', 'dev', 'dev', 'dev_to_str', 'dev_from_str', 'memory_on_dev',
'gpu_is_available', 'num_gpus', 'tpu_is_available', 'dtype', 'dtype_to_str', 'cprint',
'to_ivy_module', 'tree_flatten', 'tree_unflatten', 'start_compiling', 'stop_compiling',
'get_compiled', 'index_nest', 'set_nest_at_index', 'map_nest_at_index', 'multi_index_nest',
'set_nest_at_indices', 'map_nest_at_indices', 'nested_indices_where', 'map',
'unset_default_device', 'closest_valid_dtype', 'default_dtype', 'dtype_from_str']
ARRAYLESS_RET_METHODS = ['to_numpy', 'to_list', 'to_scalar', 'shape', 'get_num_dims', 'is_array', 'is_variable']
NESTED_ARRAY_RET_METHODS = ['unstack', 'split']
FW_FN_KEYWORDS = {'numpy': [],
'jax': [],
'tensorflow': [],
'torch': [],
'mxnet': ['ndarray']}
NATIVE_KEYS_TO_SKIP = {'numpy': [],
'jax': [],
'tensorflow': [],
'torch': ['classes', 'torch', 'is_grad_enabled', 'get_default_dtype', 'numel', 'clone', 'cpu',
'set_', 'type', 'requires_grad_'],
'mxnet': []}
# Methods #
def _wrap_method(fn):
if hasattr(fn, '__name__') and (fn.__name__[0] == '_' or fn.__name__ in NON_WRAPPED_METHODS):
return fn
if hasattr(fn, 'wrapped') and fn.wrapped:
return fn
def _method_wrapped(*args, **kwargs):
native_args, native_kwargs = ivy.args_to_native(*args, **kwargs)
native_ret = fn(*native_args, **native_kwargs)
if fn.__name__ in ARRAYLESS_RET_METHODS + NESTED_ARRAY_RET_METHODS:
return native_ret
return ivy.to_ivy(native_ret, nested=True)
if hasattr(fn, '__name__'):
_method_wrapped.__name__ = fn.__name__
_method_wrapped.wrapped = True
_method_wrapped.inner_fn = fn
return _method_wrapped
def _unwrap_method(method_wrapped):
if not hasattr(method_wrapped, 'wrapped') or not method_wrapped.wrapped:
return method_wrapped
return method_wrapped.inner_fn
def _invalid_fn(fn, fs=None):
if fs is None:
fs = ivy.current_framework_str()
if isinstance(fn, np.ufunc):
return False
if not hasattr(fn, '__module__') or not fn.__module__:
return True
fw_fn_keywords = ['ivy', fs] + FW_FN_KEYWORDS[fs]
for kw in fw_fn_keywords:
if kw in fn.__module__:
return False
return True
def _wrap_or_unwrap_methods(wrap_or_unwrap_fn, val=None, fs=None, classes_to_wrap=None, native=False, depth=0):
classes_to_wrap = [] if classes_to_wrap is None else classes_to_wrap
if fs is None:
fs = ivy.current_framework_str()
if val is None:
val = importlib.import_module(ivy.current_framework_str()) if native else ivy
str_to_check = fs if native else 'ivy'
is_class = inspect.isclass(val)
if isinstance(val, ModuleType) or (val in classes_to_wrap):
if val in wrapped_modules_n_classes or (('__file__' not in val.__dict__ or
(str_to_check not in val.__file__) or 'framework_handler' in val.__file__) and not is_class):
return val
wrapped_modules_n_classes.append(val)
if is_class:
for k in dir(val):
if native and (k in NATIVE_KEYS_TO_SKIP[fs]):
continue
v = getattr(val, k)
if v is not None:
# noinspection PyBroadException
try:
setattr(val, k, _wrap_or_unwrap_methods(
wrap_or_unwrap_fn, v, fs, classes_to_wrap, native, depth + 1))
except Exception:
pass
else:
for k, v in val.__dict__.items():
if native and (k in NATIVE_KEYS_TO_SKIP[fs] or k[0] == '_'):
continue
if v is None:
val.__dict__[k] = v
else:
# noinspection PyBroadException
try:
val.__dict__[k] = _wrap_or_unwrap_methods(
wrap_or_unwrap_fn, v, fs, classes_to_wrap, native, depth + 1)
except Exception:
pass
if depth == 0:
wrapped_modules_n_classes.clear()
return val
elif callable(val) and not is_class:
if depth == 0:
wrapped_modules_n_classes.clear()
if (hasattr(val, 'inner_fn') and (_invalid_fn(val.inner_fn) and not native))\
or (_invalid_fn(val) and not native):
return val
return wrap_or_unwrap_fn(val)
if depth == 0:
wrapped_modules_n_classes.clear()
return val
def _wrap_methods():
return _wrap_or_unwrap_methods(_wrap_method)
def _unwrap_methods():
return _wrap_or_unwrap_methods(_unwrap_method)
| 40.380282 | 117 | 0.592431 | import ivy
import inspect
import importlib
import numpy as np
from types import ModuleType
wrapped_modules_n_classes = []
NON_WRAPPED_METHODS = ['current_framework', 'current_framework_str', 'set_framework', 'get_framework',
'unset_framework', 'set_debug_mode', 'set_breakpoint_debug_mode', 'set_exception_debug_mode',
'unset_debug_mode', 'debug_mode', 'nested_map', 'to_ivy', 'args_to_ivy', 'to_native',
'args_to_native', 'default', 'exists', 'set_min_base', 'get_min_base', 'set_min_denominator',
'get_min_denominator', 'split_func_call_across_gpus', 'cache_fn', 'split_func_call',
'compile', 'compile_graph', 'dev', 'dev', 'dev_to_str', 'dev_from_str', 'memory_on_dev',
'gpu_is_available', 'num_gpus', 'tpu_is_available', 'dtype', 'dtype_to_str', 'cprint',
'to_ivy_module', 'tree_flatten', 'tree_unflatten', 'start_compiling', 'stop_compiling',
'get_compiled', 'index_nest', 'set_nest_at_index', 'map_nest_at_index', 'multi_index_nest',
'set_nest_at_indices', 'map_nest_at_indices', 'nested_indices_where', 'map',
'unset_default_device', 'closest_valid_dtype', 'default_dtype', 'dtype_from_str']
ARRAYLESS_RET_METHODS = ['to_numpy', 'to_list', 'to_scalar', 'shape', 'get_num_dims', 'is_array', 'is_variable']
NESTED_ARRAY_RET_METHODS = ['unstack', 'split']
FW_FN_KEYWORDS = {'numpy': [],
'jax': [],
'tensorflow': [],
'torch': [],
'mxnet': ['ndarray']}
NATIVE_KEYS_TO_SKIP = {'numpy': [],
'jax': [],
'tensorflow': [],
'torch': ['classes', 'torch', 'is_grad_enabled', 'get_default_dtype', 'numel', 'clone', 'cpu',
'set_', 'type', 'requires_grad_'],
'mxnet': []}
def _wrap_method(fn):
if hasattr(fn, '__name__') and (fn.__name__[0] == '_' or fn.__name__ in NON_WRAPPED_METHODS):
return fn
if hasattr(fn, 'wrapped') and fn.wrapped:
return fn
def _method_wrapped(*args, **kwargs):
native_args, native_kwargs = ivy.args_to_native(*args, **kwargs)
native_ret = fn(*native_args, **native_kwargs)
if fn.__name__ in ARRAYLESS_RET_METHODS + NESTED_ARRAY_RET_METHODS:
return native_ret
return ivy.to_ivy(native_ret, nested=True)
if hasattr(fn, '__name__'):
_method_wrapped.__name__ = fn.__name__
_method_wrapped.wrapped = True
_method_wrapped.inner_fn = fn
return _method_wrapped
def _unwrap_method(method_wrapped):
if not hasattr(method_wrapped, 'wrapped') or not method_wrapped.wrapped:
return method_wrapped
return method_wrapped.inner_fn
def _invalid_fn(fn, fs=None):
if fs is None:
fs = ivy.current_framework_str()
if isinstance(fn, np.ufunc):
return False
if not hasattr(fn, '__module__') or not fn.__module__:
return True
fw_fn_keywords = ['ivy', fs] + FW_FN_KEYWORDS[fs]
for kw in fw_fn_keywords:
if kw in fn.__module__:
return False
return True
def _wrap_or_unwrap_methods(wrap_or_unwrap_fn, val=None, fs=None, classes_to_wrap=None, native=False, depth=0):
classes_to_wrap = [] if classes_to_wrap is None else classes_to_wrap
if fs is None:
fs = ivy.current_framework_str()
if val is None:
val = importlib.import_module(ivy.current_framework_str()) if native else ivy
str_to_check = fs if native else 'ivy'
is_class = inspect.isclass(val)
if isinstance(val, ModuleType) or (val in classes_to_wrap):
if val in wrapped_modules_n_classes or (('__file__' not in val.__dict__ or
(str_to_check not in val.__file__) or 'framework_handler' in val.__file__) and not is_class):
return val
wrapped_modules_n_classes.append(val)
if is_class:
for k in dir(val):
if native and (k in NATIVE_KEYS_TO_SKIP[fs]):
continue
v = getattr(val, k)
if v is not None:
try:
setattr(val, k, _wrap_or_unwrap_methods(
wrap_or_unwrap_fn, v, fs, classes_to_wrap, native, depth + 1))
except Exception:
pass
else:
for k, v in val.__dict__.items():
if native and (k in NATIVE_KEYS_TO_SKIP[fs] or k[0] == '_'):
continue
if v is None:
val.__dict__[k] = v
else:
try:
val.__dict__[k] = _wrap_or_unwrap_methods(
wrap_or_unwrap_fn, v, fs, classes_to_wrap, native, depth + 1)
except Exception:
pass
if depth == 0:
wrapped_modules_n_classes.clear()
return val
elif callable(val) and not is_class:
if depth == 0:
wrapped_modules_n_classes.clear()
if (hasattr(val, 'inner_fn') and (_invalid_fn(val.inner_fn) and not native))\
or (_invalid_fn(val) and not native):
return val
return wrap_or_unwrap_fn(val)
if depth == 0:
wrapped_modules_n_classes.clear()
return val
def _wrap_methods():
return _wrap_or_unwrap_methods(_wrap_method)
def _unwrap_methods():
return _wrap_or_unwrap_methods(_unwrap_method)
| true | true |
f71f5cbb0f82e3b460895dc04351f46514cc35da | 1,549 | py | Python | idb/client/pid_saver.py | fakeNetflix/facebook-repo-idb | eb4ed5a7dc4a14b224a22e833294d7366fe4725e | [
"MIT"
] | 1 | 2021-03-09T07:29:18.000Z | 2021-03-09T07:29:18.000Z | idb/client/pid_saver.py | fakeNetflix/facebook-repo-idb | eb4ed5a7dc4a14b224a22e833294d7366fe4725e | [
"MIT"
] | 6 | 2021-05-10T08:32:56.000Z | 2022-02-26T01:41:09.000Z | idb/client/pid_saver.py | fakeNetflix/facebook-repo-idb | eb4ed5a7dc4a14b224a22e833294d7366fe4725e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import json
import logging
import os
import signal
from typing import List
from idb.common.constants import IDB_PID_PATH
def save_pid(pid: int) -> None:
pids = _get_pids()
pids.append(pid)
_write_pids(pids=pids)
logging.debug(f"saved daemon pid {pid}")
def remove_pid(pid: int) -> None:
pids = _get_pids()
if pids.count(pid) > 0:
pids.remove(pid)
_write_pids(pids=pids)
logging.debug(f"removed daemon pid {pid}")
def _write_pids(pids: List[int]) -> None:
with open(IDB_PID_PATH, "w") as pid_file:
json.dump(pids, pid_file)
pid_file.flush()
def _has_saved_pids() -> bool:
pids = _get_pids()
logging.debug(f"has saved pids {pids}")
return len(pids) > 0
def _get_pids() -> List[int]:
try:
with open(IDB_PID_PATH) as pid_file:
return json.load(pid_file)
except Exception:
return []
def _clear_saved_pids() -> None:
if os.path.exists(IDB_PID_PATH):
# Empty the file
with open(IDB_PID_PATH, "wb", buffering=0) as pid_file:
pid_file.flush()
async def kill_saved_pids() -> None:
if not _has_saved_pids():
logging.debug(f"no daemon pid found")
return
for pid in _get_pids():
try:
os.kill(pid, signal.SIGTERM)
logging.info(f"stopped daemon with pid {pid}")
except OSError or ProcessLookupError:
pass
_clear_saved_pids()
| 23.469697 | 71 | 0.632666 |
import json
import logging
import os
import signal
from typing import List
from idb.common.constants import IDB_PID_PATH
def save_pid(pid: int) -> None:
pids = _get_pids()
pids.append(pid)
_write_pids(pids=pids)
logging.debug(f"saved daemon pid {pid}")
def remove_pid(pid: int) -> None:
pids = _get_pids()
if pids.count(pid) > 0:
pids.remove(pid)
_write_pids(pids=pids)
logging.debug(f"removed daemon pid {pid}")
def _write_pids(pids: List[int]) -> None:
with open(IDB_PID_PATH, "w") as pid_file:
json.dump(pids, pid_file)
pid_file.flush()
def _has_saved_pids() -> bool:
pids = _get_pids()
logging.debug(f"has saved pids {pids}")
return len(pids) > 0
def _get_pids() -> List[int]:
try:
with open(IDB_PID_PATH) as pid_file:
return json.load(pid_file)
except Exception:
return []
def _clear_saved_pids() -> None:
if os.path.exists(IDB_PID_PATH):
with open(IDB_PID_PATH, "wb", buffering=0) as pid_file:
pid_file.flush()
async def kill_saved_pids() -> None:
if not _has_saved_pids():
logging.debug(f"no daemon pid found")
return
for pid in _get_pids():
try:
os.kill(pid, signal.SIGTERM)
logging.info(f"stopped daemon with pid {pid}")
except OSError or ProcessLookupError:
pass
_clear_saved_pids()
| true | true |
f71f5d1fb7366ad5808529f520d04d12bd1805b1 | 12,476 | py | Python | bot/bot.py | mudkipdev/pydis-bot | 234fba49e039fc4c5c8421162e803b1be3d0d33c | [
"MIT",
"BSD-3-Clause"
] | null | null | null | bot/bot.py | mudkipdev/pydis-bot | 234fba49e039fc4c5c8421162e803b1be3d0d33c | [
"MIT",
"BSD-3-Clause"
] | null | null | null | bot/bot.py | mudkipdev/pydis-bot | 234fba49e039fc4c5c8421162e803b1be3d0d33c | [
"MIT",
"BSD-3-Clause"
] | null | null | null | import asyncio
import logging
import socket
import warnings
from collections import defaultdict
from typing import Dict, Optional
import aiohttp
import discord
from async_rediscache import RedisSession
from discord.ext import commands
from sentry_sdk import push_scope
from bot import api, constants
from bot.async_stats import AsyncStatsClient
log = logging.getLogger('bot')
LOCALHOST = "127.0.0.1"
class Bot(commands.Bot):
"""A subclass of `discord.ext.commands.Bot` with an aiohttp session and an API client."""
def __init__(self, *args, redis_session: RedisSession, **kwargs):
if "connector" in kwargs:
warnings.warn(
"If login() is called (or the bot is started), the connector will be overwritten "
"with an internal one"
)
super().__init__(*args, **kwargs)
self.http_session: Optional[aiohttp.ClientSession] = None
self.redis_session = redis_session
self.api_client = api.APIClient(loop=self.loop)
self.filter_list_cache = defaultdict(dict)
self._connector = None
self._resolver = None
self._statsd_timerhandle: asyncio.TimerHandle = None
self._guild_available = asyncio.Event()
statsd_url = constants.Stats.statsd_host
if constants.DEBUG_MODE:
# Since statsd is UDP, there are no errors for sending to a down port.
# For this reason, setting the statsd host to 127.0.0.1 for development
# will effectively disable stats.
statsd_url = LOCALHOST
self.stats = AsyncStatsClient(self.loop, LOCALHOST)
self._connect_statsd(statsd_url)
def _connect_statsd(self, statsd_url: str, retry_after: int = 2, attempt: int = 1) -> None:
"""Callback used to retry a connection to statsd if it should fail."""
if attempt >= 8:
log.error("Reached 8 attempts trying to reconnect AsyncStatsClient. Aborting")
return
try:
self.stats = AsyncStatsClient(self.loop, statsd_url, 8125, prefix="bot")
except socket.gaierror:
log.warning(f"Statsd client failed to connect (Attempt(s): {attempt})")
# Use a fallback strategy for retrying, up to 8 times.
self._statsd_timerhandle = self.loop.call_later(
retry_after,
self._connect_statsd,
statsd_url,
retry_after * 2,
attempt + 1
)
async def cache_filter_list_data(self) -> None:
"""Cache all the data in the FilterList on the site."""
full_cache = await self.api_client.get('bot/filter-lists')
for item in full_cache:
self.insert_item_into_filter_list_cache(item)
def _recreate(self) -> None:
"""Re-create the connector, aiohttp session, the APIClient and the Redis session."""
# Use asyncio for DNS resolution instead of threads so threads aren't spammed.
# Doesn't seem to have any state with regards to being closed, so no need to worry?
self._resolver = aiohttp.AsyncResolver()
# Its __del__ does send a warning but it doesn't always show up for some reason.
if self._connector and not self._connector._closed:
log.warning(
"The previous connector was not closed; it will remain open and be overwritten"
)
if self.redis_session.closed:
# If the RedisSession was somehow closed, we try to reconnect it
# here. Normally, this shouldn't happen.
self.loop.create_task(self.redis_session.connect())
# Use AF_INET as its socket family to prevent HTTPS related problems both locally
# and in production.
self._connector = aiohttp.TCPConnector(
resolver=self._resolver,
family=socket.AF_INET,
)
# Client.login() will call HTTPClient.static_login() which will create a session using
# this connector attribute.
self.http.connector = self._connector
# Its __del__ does send a warning but it doesn't always show up for some reason.
if self.http_session and not self.http_session.closed:
log.warning(
"The previous session was not closed; it will remain open and be overwritten"
)
self.http_session = aiohttp.ClientSession(connector=self._connector)
self.api_client.recreate(force=True, connector=self._connector)
# Build the FilterList cache
self.loop.create_task(self.cache_filter_list_data())
@classmethod
def create(cls) -> "Bot":
"""Create and return an instance of a Bot."""
loop = asyncio.get_event_loop()
allowed_roles = [discord.Object(id_) for id_ in constants.MODERATION_ROLES]
intents = discord.Intents().all()
intents.presences = False
intents.dm_typing = False
intents.dm_reactions = False
intents.invites = False
intents.webhooks = False
intents.integrations = False
return cls(
redis_session=_create_redis_session(loop),
loop=loop,
command_prefix=commands.when_mentioned_or(constants.Bot.prefix),
activity=discord.Game(name=f"Commands: {constants.Bot.prefix}help"),
case_insensitive=True,
max_messages=10_000,
allowed_mentions=discord.AllowedMentions(everyone=False, roles=allowed_roles),
intents=intents,
)
def load_extensions(self) -> None:
"""Load all enabled extensions."""
# Must be done here to avoid a circular import.
from bot.utils.extensions import EXTENSIONS
extensions = set(EXTENSIONS) # Create a mutable copy.
if not constants.HelpChannels.enable:
extensions.remove("bot.exts.help_channels")
for extension in extensions:
self.load_extension(extension)
def add_cog(self, cog: commands.Cog) -> None:
"""Adds a "cog" to the bot and logs the operation."""
super().add_cog(cog)
log.info(f"Cog loaded: {cog.qualified_name}")
def add_command(self, command: commands.Command) -> None:
"""Add `command` as normal and then add its root aliases to the bot."""
super().add_command(command)
self._add_root_aliases(command)
def remove_command(self, name: str) -> Optional[commands.Command]:
"""
Remove a command/alias as normal and then remove its root aliases from the bot.
Individual root aliases cannot be removed by this function.
To remove them, either remove the entire command or manually edit `bot.all_commands`.
"""
command = super().remove_command(name)
if command is None:
# Even if it's a root alias, there's no way to get the Bot instance to remove the alias.
return
self._remove_root_aliases(command)
return command
def clear(self) -> None:
"""
Clears the internal state of the bot and recreates the connector and sessions.
Will cause a DeprecationWarning if called outside a coroutine.
"""
# Because discord.py recreates the HTTPClient session, may as well follow suit and recreate
# our own stuff here too.
self._recreate()
super().clear()
async def close(self) -> None:
"""Close the Discord connection and the aiohttp session, connector, statsd client, and resolver."""
await super().close()
await self.api_client.close()
if self.http_session:
await self.http_session.close()
if self._connector:
await self._connector.close()
if self._resolver:
await self._resolver.close()
if self.stats._transport:
self.stats._transport.close()
if self.redis_session:
await self.redis_session.close()
if self._statsd_timerhandle:
self._statsd_timerhandle.cancel()
def insert_item_into_filter_list_cache(self, item: Dict[str, str]) -> None:
"""Add an item to the bots filter_list_cache."""
type_ = item["type"]
allowed = item["allowed"]
content = item["content"]
self.filter_list_cache[f"{type_}.{allowed}"][content] = {
"id": item["id"],
"comment": item["comment"],
"created_at": item["created_at"],
"updated_at": item["updated_at"],
}
async def login(self, *args, **kwargs) -> None:
"""Re-create the connector and set up sessions before logging into Discord."""
self._recreate()
await self.stats.create_socket()
await super().login(*args, **kwargs)
async def on_guild_available(self, guild: discord.Guild) -> None:
"""
Set the internal guild available event when constants.Guild.id becomes available.
If the cache appears to still be empty (no members, no channels, or no roles), the event
will not be set.
"""
if guild.id != constants.Guild.id:
return
if not guild.roles or not guild.members or not guild.channels:
msg = "Guild available event was dispatched but the cache appears to still be empty!"
log.warning(msg)
try:
webhook = await self.fetch_webhook(constants.Webhooks.dev_log)
except discord.HTTPException as e:
log.error(f"Failed to fetch webhook to send empty cache warning: status {e.status}")
else:
await webhook.send(f"<@&{constants.Roles.admin}> {msg}")
return
self._guild_available.set()
async def on_guild_unavailable(self, guild: discord.Guild) -> None:
"""Clear the internal guild available event when constants.Guild.id becomes unavailable."""
if guild.id != constants.Guild.id:
return
self._guild_available.clear()
async def wait_until_guild_available(self) -> None:
"""
Wait until the constants.Guild.id guild is available (and the cache is ready).
The on_ready event is inadequate because it only waits 2 seconds for a GUILD_CREATE
gateway event before giving up and thus not populating the cache for unavailable guilds.
"""
await self._guild_available.wait()
async def on_error(self, event: str, *args, **kwargs) -> None:
"""Log errors raised in event listeners rather than printing them to stderr."""
self.stats.incr(f"errors.event.{event}")
with push_scope() as scope:
scope.set_tag("event", event)
scope.set_extra("args", args)
scope.set_extra("kwargs", kwargs)
log.exception(f"Unhandled exception in {event}.")
def _add_root_aliases(self, command: commands.Command) -> None:
"""Recursively add root aliases for `command` and any of its subcommands."""
if isinstance(command, commands.Group):
for subcommand in command.commands:
self._add_root_aliases(subcommand)
for alias in getattr(command, "root_aliases", ()):
if alias in self.all_commands:
raise commands.CommandRegistrationError(alias, alias_conflict=True)
self.all_commands[alias] = command
def _remove_root_aliases(self, command: commands.Command) -> None:
"""Recursively remove root aliases for `command` and any of its subcommands."""
if isinstance(command, commands.Group):
for subcommand in command.commands:
self._remove_root_aliases(subcommand)
for alias in getattr(command, "root_aliases", ()):
self.all_commands.pop(alias, None)
def _create_redis_session(loop: asyncio.AbstractEventLoop) -> RedisSession:
"""
Create and connect to a redis session.
Ensure the connection is established before returning to prevent race conditions.
`loop` is the event loop on which to connect. The Bot should use this same event loop.
"""
redis_session = RedisSession(
address=(constants.Redis.host, constants.Redis.port),
password=constants.Redis.password,
minsize=1,
maxsize=20,
use_fakeredis=constants.Redis.use_fakeredis,
global_namespace="bot",
)
loop.run_until_complete(redis_session.connect())
return redis_session
| 38.152905 | 107 | 0.64083 | import asyncio
import logging
import socket
import warnings
from collections import defaultdict
from typing import Dict, Optional
import aiohttp
import discord
from async_rediscache import RedisSession
from discord.ext import commands
from sentry_sdk import push_scope
from bot import api, constants
from bot.async_stats import AsyncStatsClient
log = logging.getLogger('bot')
LOCALHOST = "127.0.0.1"
class Bot(commands.Bot):
def __init__(self, *args, redis_session: RedisSession, **kwargs):
if "connector" in kwargs:
warnings.warn(
"If login() is called (or the bot is started), the connector will be overwritten "
"with an internal one"
)
super().__init__(*args, **kwargs)
self.http_session: Optional[aiohttp.ClientSession] = None
self.redis_session = redis_session
self.api_client = api.APIClient(loop=self.loop)
self.filter_list_cache = defaultdict(dict)
self._connector = None
self._resolver = None
self._statsd_timerhandle: asyncio.TimerHandle = None
self._guild_available = asyncio.Event()
statsd_url = constants.Stats.statsd_host
if constants.DEBUG_MODE:
statsd_url = LOCALHOST
self.stats = AsyncStatsClient(self.loop, LOCALHOST)
self._connect_statsd(statsd_url)
def _connect_statsd(self, statsd_url: str, retry_after: int = 2, attempt: int = 1) -> None:
if attempt >= 8:
log.error("Reached 8 attempts trying to reconnect AsyncStatsClient. Aborting")
return
try:
self.stats = AsyncStatsClient(self.loop, statsd_url, 8125, prefix="bot")
except socket.gaierror:
log.warning(f"Statsd client failed to connect (Attempt(s): {attempt})")
self._statsd_timerhandle = self.loop.call_later(
retry_after,
self._connect_statsd,
statsd_url,
retry_after * 2,
attempt + 1
)
async def cache_filter_list_data(self) -> None:
full_cache = await self.api_client.get('bot/filter-lists')
for item in full_cache:
self.insert_item_into_filter_list_cache(item)
def _recreate(self) -> None:
# Doesn't seem to have any state with regards to being closed, so no need to worry?
self._resolver = aiohttp.AsyncResolver()
if self._connector and not self._connector._closed:
log.warning(
"The previous connector was not closed; it will remain open and be overwritten"
)
if self.redis_session.closed:
# If the RedisSession was somehow closed, we try to reconnect it
# here. Normally, this shouldn't happen.
self.loop.create_task(self.redis_session.connect())
self._connector = aiohttp.TCPConnector(
resolver=self._resolver,
family=socket.AF_INET,
)
self.http.connector = self._connector
if self.http_session and not self.http_session.closed:
log.warning(
"The previous session was not closed; it will remain open and be overwritten"
)
self.http_session = aiohttp.ClientSession(connector=self._connector)
self.api_client.recreate(force=True, connector=self._connector)
# Build the FilterList cache
self.loop.create_task(self.cache_filter_list_data())
@classmethod
def create(cls) -> "Bot":
loop = asyncio.get_event_loop()
allowed_roles = [discord.Object(id_) for id_ in constants.MODERATION_ROLES]
intents = discord.Intents().all()
intents.presences = False
intents.dm_typing = False
intents.dm_reactions = False
intents.invites = False
intents.webhooks = False
intents.integrations = False
return cls(
redis_session=_create_redis_session(loop),
loop=loop,
command_prefix=commands.when_mentioned_or(constants.Bot.prefix),
activity=discord.Game(name=f"Commands: {constants.Bot.prefix}help"),
case_insensitive=True,
max_messages=10_000,
allowed_mentions=discord.AllowedMentions(everyone=False, roles=allowed_roles),
intents=intents,
)
def load_extensions(self) -> None:
# Must be done here to avoid a circular import.
from bot.utils.extensions import EXTENSIONS
extensions = set(EXTENSIONS) # Create a mutable copy.
if not constants.HelpChannels.enable:
extensions.remove("bot.exts.help_channels")
for extension in extensions:
self.load_extension(extension)
def add_cog(self, cog: commands.Cog) -> None:
super().add_cog(cog)
log.info(f"Cog loaded: {cog.qualified_name}")
def add_command(self, command: commands.Command) -> None:
super().add_command(command)
self._add_root_aliases(command)
def remove_command(self, name: str) -> Optional[commands.Command]:
command = super().remove_command(name)
if command is None:
# Even if it's a root alias, there's no way to get the Bot instance to remove the alias.
return
self._remove_root_aliases(command)
return command
def clear(self) -> None:
# Because discord.py recreates the HTTPClient session, may as well follow suit and recreate
# our own stuff here too.
self._recreate()
super().clear()
async def close(self) -> None:
await super().close()
await self.api_client.close()
if self.http_session:
await self.http_session.close()
if self._connector:
await self._connector.close()
if self._resolver:
await self._resolver.close()
if self.stats._transport:
self.stats._transport.close()
if self.redis_session:
await self.redis_session.close()
if self._statsd_timerhandle:
self._statsd_timerhandle.cancel()
def insert_item_into_filter_list_cache(self, item: Dict[str, str]) -> None:
type_ = item["type"]
allowed = item["allowed"]
content = item["content"]
self.filter_list_cache[f"{type_}.{allowed}"][content] = {
"id": item["id"],
"comment": item["comment"],
"created_at": item["created_at"],
"updated_at": item["updated_at"],
}
async def login(self, *args, **kwargs) -> None:
self._recreate()
await self.stats.create_socket()
await super().login(*args, **kwargs)
async def on_guild_available(self, guild: discord.Guild) -> None:
if guild.id != constants.Guild.id:
return
if not guild.roles or not guild.members or not guild.channels:
msg = "Guild available event was dispatched but the cache appears to still be empty!"
log.warning(msg)
try:
webhook = await self.fetch_webhook(constants.Webhooks.dev_log)
except discord.HTTPException as e:
log.error(f"Failed to fetch webhook to send empty cache warning: status {e.status}")
else:
await webhook.send(f"<@&{constants.Roles.admin}> {msg}")
return
self._guild_available.set()
async def on_guild_unavailable(self, guild: discord.Guild) -> None:
if guild.id != constants.Guild.id:
return
self._guild_available.clear()
async def wait_until_guild_available(self) -> None:
await self._guild_available.wait()
async def on_error(self, event: str, *args, **kwargs) -> None:
self.stats.incr(f"errors.event.{event}")
with push_scope() as scope:
scope.set_tag("event", event)
scope.set_extra("args", args)
scope.set_extra("kwargs", kwargs)
log.exception(f"Unhandled exception in {event}.")
def _add_root_aliases(self, command: commands.Command) -> None:
if isinstance(command, commands.Group):
for subcommand in command.commands:
self._add_root_aliases(subcommand)
for alias in getattr(command, "root_aliases", ()):
if alias in self.all_commands:
raise commands.CommandRegistrationError(alias, alias_conflict=True)
self.all_commands[alias] = command
def _remove_root_aliases(self, command: commands.Command) -> None:
if isinstance(command, commands.Group):
for subcommand in command.commands:
self._remove_root_aliases(subcommand)
for alias in getattr(command, "root_aliases", ()):
self.all_commands.pop(alias, None)
def _create_redis_session(loop: asyncio.AbstractEventLoop) -> RedisSession:
redis_session = RedisSession(
address=(constants.Redis.host, constants.Redis.port),
password=constants.Redis.password,
minsize=1,
maxsize=20,
use_fakeredis=constants.Redis.use_fakeredis,
global_namespace="bot",
)
loop.run_until_complete(redis_session.connect())
return redis_session
| true | true |
f71f5dc2484d87171414c6d905bc5a1656c3625b | 4,043 | py | Python | encoders/audio/Wav2VecSpeechEncoder/__init__.py | akurniawan/jina-hub | d89bc5e8f527f1212c3228a15775e222983c0087 | [
"Apache-2.0"
] | null | null | null | encoders/audio/Wav2VecSpeechEncoder/__init__.py | akurniawan/jina-hub | d89bc5e8f527f1212c3228a15775e222983c0087 | [
"Apache-2.0"
] | null | null | null | encoders/audio/Wav2VecSpeechEncoder/__init__.py | akurniawan/jina-hub | d89bc5e8f527f1212c3228a15775e222983c0087 | [
"Apache-2.0"
] | null | null | null | __copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
from typing import Optional
import numpy as np
from jina.executors.decorators import batching, as_ndarray
from jina.executors.encoders import BaseAudioEncoder
from jina.executors.encoders.frameworks import BaseTorchEncoder
from jina.excepts import PretrainedModelFileDoesNotExist
from jina.helper import cached_property
class Wav2VecSpeechEncoder(BaseTorchEncoder, BaseAudioEncoder):
"""
Use a pre-trained model (`wav2vec`) to encode audio signal.
:class:`Wav2VecSpeechEncoder` is a speech encoder based on `wav2vec`,
an unsupervised pre-trained model for speech recognition presented and implemented
by Facebook: https://github.com/pytorch/fairseq/tree/master/examples/wav2vec
It uses a pre-trained model to encode an audio signal from
a `Batch x Signal Length` ndarray into a `Batch x Concatenated Features` ndarray,
and produces a representation for each time step at a rate of 100 Hz.
:param model_path: the path of the pre-trained model.
The pre-trained model can be downloaded at
https://github.com/pytorch/fairseq/tree/master/examples/wav2vec/README.md#wav2vec
:param input_sample_rate: input sampling rate in Hz (22050 by default)
"""
def __init__(self,
model_path: Optional[str] = '/tmp/wav2vec_large.pt',
input_sample_rate: int = 22050,
*args,
**kwargs):
"""Set Constructor"""
super().__init__(*args, **kwargs)
self.model_path = model_path
self.input_sample_rate = input_sample_rate
def post_init(self):
super().post_init()
if self.model_path and os.path.exists(self.model_path):
import torch
from fairseq.models.wav2vec import Wav2VecModel
cp = torch.load(self.model_path, map_location=torch.device('cpu'))
self.model = Wav2VecModel.build_model(cp['args'], task=None)
self.model.load_state_dict(cp['model'])
self.model.eval()
self.to_device(self.model)
self._tensor_func = torch.tensor
else:
raise PretrainedModelFileDoesNotExist(f'model at {self.model_path} does not exist')
@batching
@as_ndarray
def encode(self, data: np.ndarray, *args, **kwargs) -> np.ndarray:
"""
Resample input audio signal to 16kHz.
Segments the resampled signal of each Doc into `wav2vec` frames,
encodes the frames and concatenates Doc frame embeddings into a
single Doc embedding.
:param data: A`Batch x Signal Length` ndarray, where
`Signal Length` is a number of samples
:return: A `Batch x Concatenated Features` ndarray,
where `Concatenated Features` is a 512-dimensional feature
vector times the number of the wav2vec frames.
"""
assert data.shape[1] >= 465, 'the signal must have at least 465 samples'
from librosa import resample
embeds = []
with self.session():
for chunk_data in data:
resampled_signal = resample(chunk_data, self.input_sample_rate, 16000)
signal_tensor = self.array2tensor(resampled_signal.reshape(1, -1))
features = self.model.feature_extractor(signal_tensor)
embed_tensor = self.model.feature_aggregator(features)[0]
chunk_embed = self.tensor2array(embed_tensor).T.flatten()
embeds.append(chunk_embed)
return embeds
def array2tensor(self, array):
tensor = self._tensor_func(array)
return tensor.cuda() if self.on_gpu else tensor
def tensor2array(self, tensor):
return tensor.cuda().numpy() if self.on_gpu else tensor.numpy()
@cached_property
def session(self):
return self.get_session()
def get_session(self):
from torch import no_grad
return no_grad | 40.838384 | 95 | 0.666337 | __copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import os
from typing import Optional
import numpy as np
from jina.executors.decorators import batching, as_ndarray
from jina.executors.encoders import BaseAudioEncoder
from jina.executors.encoders.frameworks import BaseTorchEncoder
from jina.excepts import PretrainedModelFileDoesNotExist
from jina.helper import cached_property
class Wav2VecSpeechEncoder(BaseTorchEncoder, BaseAudioEncoder):
def __init__(self,
model_path: Optional[str] = '/tmp/wav2vec_large.pt',
input_sample_rate: int = 22050,
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.model_path = model_path
self.input_sample_rate = input_sample_rate
def post_init(self):
super().post_init()
if self.model_path and os.path.exists(self.model_path):
import torch
from fairseq.models.wav2vec import Wav2VecModel
cp = torch.load(self.model_path, map_location=torch.device('cpu'))
self.model = Wav2VecModel.build_model(cp['args'], task=None)
self.model.load_state_dict(cp['model'])
self.model.eval()
self.to_device(self.model)
self._tensor_func = torch.tensor
else:
raise PretrainedModelFileDoesNotExist(f'model at {self.model_path} does not exist')
@batching
@as_ndarray
def encode(self, data: np.ndarray, *args, **kwargs) -> np.ndarray:
assert data.shape[1] >= 465, 'the signal must have at least 465 samples'
from librosa import resample
embeds = []
with self.session():
for chunk_data in data:
resampled_signal = resample(chunk_data, self.input_sample_rate, 16000)
signal_tensor = self.array2tensor(resampled_signal.reshape(1, -1))
features = self.model.feature_extractor(signal_tensor)
embed_tensor = self.model.feature_aggregator(features)[0]
chunk_embed = self.tensor2array(embed_tensor).T.flatten()
embeds.append(chunk_embed)
return embeds
def array2tensor(self, array):
tensor = self._tensor_func(array)
return tensor.cuda() if self.on_gpu else tensor
def tensor2array(self, tensor):
return tensor.cuda().numpy() if self.on_gpu else tensor.numpy()
@cached_property
def session(self):
return self.get_session()
def get_session(self):
from torch import no_grad
return no_grad | true | true |
f71f5e67663079678fe379004ccba2d635f29cd6 | 3,572 | py | Python | cp_spider/cp_spider/settings.py | zachariah-chow/mas-cp-scrapy | 7c3cd8bcb9d6fc248a325621337da40398452cdb | [
"MIT"
] | null | null | null | cp_spider/cp_spider/settings.py | zachariah-chow/mas-cp-scrapy | 7c3cd8bcb9d6fc248a325621337da40398452cdb | [
"MIT"
] | null | null | null | cp_spider/cp_spider/settings.py | zachariah-chow/mas-cp-scrapy | 7c3cd8bcb9d6fc248a325621337da40398452cdb | [
"MIT"
] | null | null | null | # Scrapy settings for cp_spider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'cp_spider'
SPIDER_MODULES = ['cp_spider.spiders']
NEWSPIDER_MODULE = 'cp_spider.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'cp_spider (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
# }
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'cp_spider.middlewares.CpSpiderSpiderMiddleware': 543,
# }
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
# 'cp_spider.middlewares.CpSpiderDownloaderMiddleware': 543,
# }
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
# }
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# ITEM_PIPELINES = {
# 'cp_spider.pipelines.CpSpiderPipeline': 300,
# }
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
# Scrapy Splash Settings
SPLASH_URL = 'http://localhost:8050'
DOWNLOADER_MIDDLEWARES = {
'scrapy_splash.SplashCookiesMiddleware': 723,
'scrapy_splash.SplashMiddleware': 725,
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810,
}
SPIDER_MIDDLEWARES = {
'scrapy_splash.SplashDeduplicateArgsMiddleware': 100,
}
DUPEFILTER_CLASS = 'scrapy_splash.SplashAwareDupeFilter'
HTTPCACHE_STORAGE = 'scrapy_splash.SplashAwareFSCacheStorage'
| 33.698113 | 103 | 0.779395 |
BOT_NAME = 'cp_spider'
SPIDER_MODULES = ['cp_spider.spiders']
NEWSPIDER_MODULE = 'cp_spider.spiders'
ROBOTSTXT_OBEY = False
ocalhost:8050'
DOWNLOADER_MIDDLEWARES = {
'scrapy_splash.SplashCookiesMiddleware': 723,
'scrapy_splash.SplashMiddleware': 725,
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810,
}
SPIDER_MIDDLEWARES = {
'scrapy_splash.SplashDeduplicateArgsMiddleware': 100,
}
DUPEFILTER_CLASS = 'scrapy_splash.SplashAwareDupeFilter'
HTTPCACHE_STORAGE = 'scrapy_splash.SplashAwareFSCacheStorage'
| true | true |
f71f5e75839cd04c172644fd22b384312e83d690 | 6,499 | py | Python | grid_world.py | vigneshyaadav27/Grid-world | a5c4cab46cdafc6458526593ae31ac19a152001d | [
"MIT"
] | null | null | null | grid_world.py | vigneshyaadav27/Grid-world | a5c4cab46cdafc6458526593ae31ac19a152001d | [
"MIT"
] | null | null | null | grid_world.py | vigneshyaadav27/Grid-world | a5c4cab46cdafc6458526593ae31ac19a152001d | [
"MIT"
] | null | null | null | #######################################################################
# Copyright (C) #
# 2016-2018 Shangtong Zhang(zhangshangtong.cpp@gmail.com) #
# 2016 Kenta Shimada(hyperkentakun@gmail.com) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.table import Table
matplotlib.use('Agg')
WORLD_SIZE = 5
A_POS = [0, 1]
A_PRIME_POS = [4, 1]
B_POS = [0, 3]
B_PRIME_POS = [2, 3]
DISCOUNT = 0.9
# left, up, right, down
ACTIONS = [np.array([0, -1]),
np.array([-1, 0]),
np.array([0, 1]),
np.array([1, 0])]
ACTIONS_FIGS=[ '←', '↑', '→', '↓']
ACTION_PROB = 0.25
def step(state, action):
if state == A_POS:
return A_PRIME_POS, 10
if state == B_POS:
return B_PRIME_POS, 5
next_state = (np.array(state) + action).tolist()
x, y = next_state
if x < 0 or x >= WORLD_SIZE or y < 0 or y >= WORLD_SIZE:
reward = -1.0
next_state = state
else:
reward = 0
return next_state, reward
def draw_image(image):
fig, ax = plt.subplots()
ax.set_axis_off()
tb = Table(ax, bbox=[0, 0, 1, 1])
nrows, ncols = image.shape
width, height = 1.0 / ncols, 1.0 / nrows
# Add cells
for (i, j), val in np.ndenumerate(image):
# add state labels
if [i, j] == A_POS:
val = str(val) + " (A)"
if [i, j] == A_PRIME_POS:
val = str(val) + " (A')"
if [i, j] == B_POS:
val = str(val) + " (B)"
if [i, j] == B_PRIME_POS:
val = str(val) + " (B')"
tb.add_cell(i, j, width, height, text=val,
loc='center', facecolor='white')
# Row and column labels...
for i in range(len(image)):
tb.add_cell(i, -1, width, height, text=i+1, loc='right',
edgecolor='none', facecolor='none')
tb.add_cell(-1, i, width, height/2, text=i+1, loc='center',
edgecolor='none', facecolor='none')
ax.add_table(tb)
def draw_policy(optimal_values):
fig, ax = plt.subplots()
ax.set_axis_off()
tb = Table(ax, bbox=[0, 0, 1, 1])
nrows, ncols = optimal_values.shape
width, height = 1.0 / ncols, 1.0 / nrows
# Add cells
for (i, j), val in np.ndenumerate(optimal_values):
next_vals=[]
for action in ACTIONS:
next_state, _ = step([i, j], action)
next_vals.append(optimal_values[next_state[0],next_state[1]])
best_actions=np.where(next_vals == np.max(next_vals))[0]
val=''
for ba in best_actions:
val+=ACTIONS_FIGS[ba]
# add state labels
if [i, j] == A_POS:
val = str(val) + " (A)"
if [i, j] == A_PRIME_POS:
val = str(val) + " (A')"
if [i, j] == B_POS:
val = str(val) + " (B)"
if [i, j] == B_PRIME_POS:
val = str(val) + " (B')"
tb.add_cell(i, j, width, height, text=val,
loc='center', facecolor='white')
# Row and column labels...
for i in range(len(optimal_values)):
tb.add_cell(i, -1, width, height, text=i+1, loc='right',
edgecolor='none', facecolor='none')
tb.add_cell(-1, i, width, height/2, text=i+1, loc='center',
edgecolor='none', facecolor='none')
ax.add_table(tb)
def figure_3_2():
value = np.zeros((WORLD_SIZE, WORLD_SIZE))
while True:
# keep iteration until convergence
new_value = np.zeros_like(value)
for i in range(WORLD_SIZE):
for j in range(WORLD_SIZE):
for action in ACTIONS:
(next_i, next_j), reward = step([i, j], action)
# bellman equation
new_value[i, j] += ACTION_PROB * (reward + DISCOUNT * value[next_i, next_j])
if np.sum(np.abs(value - new_value)) < 1e-4:
draw_image(np.round(new_value, decimals=2))
plt.savefig('../images/figure_3_2.png')
plt.close()
break
value = new_value
def figure_3_2_linear_system():
'''
Here we solve the linear system of equations to find the exact solution.
We do this by filling the coefficients for each of the states with their respective right side constant.
'''
A = -1 * np.eye(WORLD_SIZE * WORLD_SIZE)
b = np.zeros(WORLD_SIZE * WORLD_SIZE)
for i in range(WORLD_SIZE):
for j in range(WORLD_SIZE):
s = [i, j] # current state
index_s = np.ravel_multi_index(s, (WORLD_SIZE, WORLD_SIZE))
for a in ACTIONS:
s_, r = step(s, a)
index_s_ = np.ravel_multi_index(s_, (WORLD_SIZE, WORLD_SIZE))
A[index_s, index_s_] += ACTION_PROB * DISCOUNT
b[index_s] -= ACTION_PROB * r
x = np.linalg.solve(A, b)
draw_image(np.round(x.reshape(WORLD_SIZE, WORLD_SIZE), decimals=2))
plt.savefig('../images/figure_3_2_linear_system.png')
plt.close()
def figure_3_5():
value = np.zeros((WORLD_SIZE, WORLD_SIZE))
while True:
# keep iteration until convergence
new_value = np.zeros_like(value)
for i in range(WORLD_SIZE):
for j in range(WORLD_SIZE):
values = []
for action in ACTIONS:
(next_i, next_j), reward = step([i, j], action)
# value iteration
values.append(reward + DISCOUNT * value[next_i, next_j])
new_value[i, j] = np.max(values)
if np.sum(np.abs(new_value - value)) < 1e-4:
draw_image(np.round(new_value, decimals=2))
plt.savefig('../images/figure_3_5.png')
plt.close()
draw_policy(new_value)
plt.savefig('../images/figure_3_5_policy.png')
plt.close()
break
value = new_value
if __name__ == '__main__':
figure_3_2_linear_system()
figure_3_2()
figure_3_5()
| 33.158163 | 109 | 0.507617 | 4:
draw_image(np.round(new_value, decimals=2))
plt.savefig('../images/figure_3_5.png')
plt.close()
draw_policy(new_value)
plt.savefig('../images/figure_3_5_policy.png')
plt.close()
break
value = new_value
if __name__ == '__main__':
figure_3_2_linear_system()
figure_3_2()
figure_3_5()
| true | true |
f71f5f797ad336b6fedd52f0f7c38c754c946db7 | 245 | py | Python | mundo 2/aula 12/exer38.py | jonatan098/cursopython | 6e4cbaef6229e230fdbc66d80ec1b5a089887b0d | [
"MIT"
] | null | null | null | mundo 2/aula 12/exer38.py | jonatan098/cursopython | 6e4cbaef6229e230fdbc66d80ec1b5a089887b0d | [
"MIT"
] | null | null | null | mundo 2/aula 12/exer38.py | jonatan098/cursopython | 6e4cbaef6229e230fdbc66d80ec1b5a089887b0d | [
"MIT"
] | 1 | 2020-02-22T17:21:05.000Z | 2020-02-22T17:21:05.000Z | num1 = int(input('digite o primeiro valor: '))
num2 = int(input('digite o segundo valor: '))
if num1 > num2:
print('o primeiro numero e maior')
elif num2 > num1:
print('o segundo numero e maior')
else:
print('os numeros são iguais')
| 27.222222 | 46 | 0.665306 | num1 = int(input('digite o primeiro valor: '))
num2 = int(input('digite o segundo valor: '))
if num1 > num2:
print('o primeiro numero e maior')
elif num2 > num1:
print('o segundo numero e maior')
else:
print('os numeros são iguais')
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.