code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
Index,
MultiIndex,
Series,
qcut,
)
import pandas._testing as tm
def cartesian_product_for_groupers(result, args, names, fill_value=np.NaN):
"""Reindex to a cartesian production for the groupers,
preserving the nature (Categorical) of each grouper
"""
def f(a):
if isinstance(a, (CategoricalIndex, Categorical)):
categories = a.categories
a = Categorical.from_codes(
np.arange(len(categories)), categories=categories, ordered=a.ordered
)
return a
index = MultiIndex.from_product(map(f, args), names=names)
return result.reindex(index, fill_value=fill_value).sort_index()
_results_for_groupbys_with_missing_categories = {
# This maps the builtin groupby functions to their expected outputs for
# missing categories when they are called on a categorical grouper with
# observed=False. Some functions are expected to return NaN, some zero.
# These expected values can be used across several tests (i.e. they are
# the same for SeriesGroupBy and DataFrameGroupBy) but they should only be
# hardcoded in one place.
"all": np.NaN,
"any": np.NaN,
"count": 0,
"corrwith": np.NaN,
"first": np.NaN,
"idxmax": np.NaN,
"idxmin": np.NaN,
"last": np.NaN,
"mad": np.NaN,
"max": np.NaN,
"mean": np.NaN,
"median": np.NaN,
"min": np.NaN,
"nth": np.NaN,
"nunique": 0,
"prod": np.NaN,
"quantile": np.NaN,
"sem": np.NaN,
"size": 0,
"skew": np.NaN,
"std": np.NaN,
"sum": 0,
"var": np.NaN,
}
def test_apply_use_categorical_name(df):
cats = qcut(df.C, 4)
def get_stats(group):
return {
"min": group.min(),
"max": group.max(),
"count": group.count(),
"mean": group.mean(),
}
result = df.groupby(cats, observed=False).D.apply(get_stats)
assert result.index.names[0] == "C"
def test_basic():
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
exp_index = CategoricalIndex(list("abcd"), name="b", ordered=True)
expected = DataFrame({"a": [1, 2, 4, np.nan]}, index=exp_index)
result = data.groupby("b", observed=False).mean()
tm.assert_frame_equal(result, expected)
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A", observed=False)
exp_idx = CategoricalIndex(["a", "b", "z"], name="A", ordered=True)
expected = DataFrame({"values": Series([3, 7, 0], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name)
g = x.groupby(["person_id"], observed=False)
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[["person_name"]])
result = x.drop_duplicates("person_name")
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates("person_name").iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name="person_id")
expected["person_name"] = expected["person_name"].astype("object")
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.max(xs)), df[["a"]]
)
# Filter
tm.assert_series_equal(df.a.groupby(c, observed=False).filter(np.all), df["a"])
tm.assert_frame_equal(df.groupby(c, observed=False).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df[["a"]]
)
# GH 9603
df = DataFrame({"a": [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list("abcd")))
result = df.groupby(c, observed=False).apply(len)
exp_index = CategoricalIndex(c.values.categories, ordered=c.values.ordered)
expected = Series([1, 0, 0, 0], index=exp_index)
expected.index.name = "a"
tm.assert_series_equal(result, expected)
# more basic
levels = ["foo", "bar", "baz", "qux"]
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
exp_idx = CategoricalIndex(levels, categories=cats.categories, ordered=True)
expected = expected.reindex(exp_idx)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = np.asarray(cats).take(idx)
ord_data = data.take(idx)
exp_cats = Categorical(
ord_labels, ordered=True, categories=["foo", "bar", "baz", "qux"]
)
expected = ord_data.groupby(exp_cats, sort=False, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_level_get_group(observed):
# GH15155
df = DataFrame(
data=np.arange(2, 22, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(10)],
codes=[[0] * 5 + [1] * 5, range(10)],
names=["Index1", "Index2"],
),
)
g = df.groupby(level=["Index1"], observed=observed)
# expected should equal test.loc[["a"]]
# GH15166
expected = DataFrame(
data=np.arange(2, 12, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(5)],
codes=[[0] * 5, range(5)],
names=["Index1", "Index2"],
),
)
result = g.get_group("a")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
def test_apply(ordered):
# GH 10138
dense = Categorical(list("abc"), ordered=ordered)
# 'b' is in the categories but not in the list
missing = Categorical(list("aaa"), categories=["a", "b"], ordered=ordered)
values = np.arange(len(dense))
df = DataFrame({"missing": missing, "dense": dense, "values": values})
grouped = df.groupby(["missing", "dense"], observed=True)
# missing category 'b' should still exist in the output index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = DataFrame([0, 1, 2.0], index=idx, columns=["values"])
# GH#21636 tracking down the xfail, in some builds np.mean(df.loc[[0]])
# is coming back as Series([0., 1., 0.], index=["missing", "dense", "values"])
# when we expect Series(0., index=["values"])
result = grouped.apply(lambda x: np.mean(x))
tm.assert_frame_equal(result, expected)
# we coerce back to ints
expected = expected.astype("int")
result = grouped.mean()
tm.assert_frame_equal(result, expected)
result = grouped.agg(np.mean)
tm.assert_frame_equal(result, expected)
# but for transform we should still get back the original index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = Series(1, index=idx)
result = grouped.apply(lambda x: 1)
tm.assert_series_equal(result, expected)
def test_observed(observed):
# multiple groupers, don't re-expand the output space
# of the grouper
# gh-14942 (implement)
# gh-10132 (back-compat)
# gh-8138 (back-compat)
# gh-8869
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
df["C"] = ["foo", "bar"] * 2
# multiple groupers with a non-cat
gb = df.groupby(["A", "B", "C"], observed=observed)
exp_index = MultiIndex.from_arrays(
[cat1, cat2, ["foo", "bar"] * 2], names=["A", "B", "C"]
)
expected = DataFrame({"values": Series([1, 2, 3, 4], index=exp_index)}).sort_index()
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2, ["foo", "bar"]], list("ABC"), fill_value=0
)
tm.assert_frame_equal(result, expected)
gb = df.groupby(["A", "B"], observed=observed)
exp_index = MultiIndex.from_arrays([cat1, cat2], names=["A", "B"])
expected = DataFrame({"values": [1, 2, 3, 4]}, index=exp_index)
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2], list("AB"), fill_value=0
)
tm.assert_frame_equal(result, expected)
# https://github.com/pandas-dev/pandas/issues/8138
d = {
"cat": Categorical(
["a", "b", "a", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 1, 2, 2],
"val": [10, 20, 30, 40],
}
df = DataFrame(d)
# Grouping on a single column
groups_single_key = df.groupby("cat", observed=observed)
result = groups_single_key.mean()
exp_index = CategoricalIndex(
list("ab"), name="cat", categories=list("abc"), ordered=True
)
expected = DataFrame({"ints": [1.5, 1.5], "val": [20.0, 30]}, index=exp_index)
if not observed:
index = CategoricalIndex(
list("abc"), name="cat", categories=list("abc"), ordered=True
)
expected = expected.reindex(index)
tm.assert_frame_equal(result, expected)
# Grouping on two columns
groups_double_key = df.groupby(["cat", "ints"], observed=observed)
result = groups_double_key.agg("mean")
expected = DataFrame(
{
"val": [10, 30, 20, 40],
"cat": Categorical(
["a", "a", "b", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 2, 1, 2],
}
).set_index(["cat", "ints"])
if not observed:
expected = cartesian_product_for_groupers(
expected, [df.cat.values, [1, 2]], ["cat", "ints"]
)
tm.assert_frame_equal(result, expected)
# GH 10132
for key in [("a", 1), ("b", 2), ("b", 1), ("a", 2)]:
c, i = key
result = groups_double_key.get_group(key)
expected = df[(df.cat == c) & (df.ints == i)]
tm.assert_frame_equal(result, expected)
# gh-8869
# with as_index
d = {
"foo": [10, 8, 4, 8, 4, 1, 1],
"bar": [10, 20, 30, 40, 50, 60, 70],
"baz": ["d", "c", "e", "a", "a", "d", "c"],
}
df = DataFrame(d)
cat = pd.cut(df["foo"], np.linspace(0, 10, 3))
df["range"] = cat
groups = df.groupby(["range", "baz"], as_index=False, observed=observed)
result = groups.agg("mean")
groups2 = df.groupby(["range", "baz"], as_index=True, observed=observed)
expected = groups2.agg("mean").reset_index()
tm.assert_frame_equal(result, expected)
def test_observed_codes_remap(observed):
d = {"C1": [3, 3, 4, 5], "C2": [1, 2, 3, 4], "C3": [10, 100, 200, 34]}
df = DataFrame(d)
values = pd.cut(df["C1"], [1, 2, 3, 6])
values.name = "cat"
groups_double_key = df.groupby([values, "C2"], observed=observed)
idx = MultiIndex.from_arrays([values, [1, 2, 3, 4]], names=["cat", "C2"])
expected = DataFrame({"C1": [3, 3, 4, 5], "C3": [10, 100, 200, 34]}, index=idx)
if not observed:
expected = cartesian_product_for_groupers(
expected, [values.values, [1, 2, 3, 4]], ["cat", "C2"]
)
result = groups_double_key.agg("mean")
tm.assert_frame_equal(result, expected)
def test_observed_perf():
# we create a cartesian product, so this is
# non-performant if we don't use observed values
# gh-14942
df = DataFrame(
{
"cat": np.random.randint(0, 255, size=30000),
"int_id": np.random.randint(0, 255, size=30000),
"other_id": np.random.randint(0, 10000, size=30000),
"foo": 0,
}
)
df["cat"] = df.cat.astype(str).astype("category")
grouped = df.groupby(["cat", "int_id", "other_id"], observed=True)
result = grouped.count()
assert result.index.levels[0].nunique() == df.cat.nunique()
assert result.index.levels[1].nunique() == df.int_id.nunique()
assert result.index.levels[2].nunique() == df.other_id.nunique()
def test_observed_groups(observed):
# gh-20583
# test that we have the appropriate groups
cat = Categorical(["a", "c", "a"], categories=["a", "b", "c"])
df = DataFrame({"cat": cat, "vals": [1, 2, 3]})
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64"), "c": Index([1], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"c": Index([1], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_groups_with_nan(observed):
# GH 24740
df = DataFrame(
{
"cat": Categorical(["a", np.nan, "a"], categories=["a", "b", "d"]),
"vals": [1, 2, 3],
}
)
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"d": Index([], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_nth():
# GH 26385
cat = Categorical(["a", np.nan, np.nan], categories=["a", "b", "c"])
ser = Series([1, 2, 3])
df = DataFrame({"cat": cat, "ser": ser})
result = df.groupby("cat", observed=False)["ser"].nth(0)
index = Categorical(["a", "b", "c"], categories=["a", "b", "c"])
expected = Series([1, np.nan, np.nan], index=index, name="ser")
expected.index.name = "cat"
tm.assert_series_equal(result, expected)
def test_dataframe_categorical_with_nan(observed):
# GH 21151
s1 = Categorical([np.nan, "a", np.nan, "a"], categories=["a", "b", "c"])
s2 = Series([1, 2, 3, 4])
df = DataFrame({"s1": s1, "s2": s2})
result = df.groupby("s1", observed=observed).first().reset_index()
if observed:
expected = DataFrame(
{"s1": Categorical(["a"], categories=["a", "b", "c"]), "s2": [2]}
)
else:
expected = DataFrame(
{
"s1": Categorical(["a", "b", "c"], categories=["a", "b", "c"]),
"s2": [2, np.nan, np.nan],
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
@pytest.mark.parametrize("observed", [True, False])
@pytest.mark.parametrize("sort", [True, False])
def test_dataframe_categorical_ordered_observed_sort(ordered, observed, sort):
# GH 25871: Fix groupby sorting on ordered Categoricals
# GH 25167: Groupby with observed=True doesn't sort
# Build a dataframe with cat having one unobserved category ('missing'),
# and a Series with identical values
label = Categorical(
["d", "a", "b", "a", "d", "b"],
categories=["a", "b", "missing", "d"],
ordered=ordered,
)
val = Series(["d", "a", "b", "a", "d", "b"])
df = DataFrame({"label": label, "val": val})
# aggregate on the Categorical
result = df.groupby("label", observed=observed, sort=sort)["val"].aggregate("first")
# If ordering works, we expect index labels equal to aggregation results,
# except for 'observed=False': label 'missing' has aggregation None
label = Series(result.index.array, dtype="object")
aggr = Series(result.array)
if not observed:
aggr[aggr.isna()] = "missing"
if not all(label == aggr):
msg = (
"Labels and aggregation results not consistently sorted\n"
f"for (ordered={ordered}, observed={observed}, sort={sort})\n"
f"Result:\n{result}"
)
assert False, msg
def test_datetime():
# GH9049: ensure backward compatibility
levels = pd.date_range("2014-01-01", periods=4)
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
expected = expected.reindex(levels)
expected.index = CategoricalIndex(
expected.index, categories=expected.index, ordered=True
)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = cats.take(idx)
ord_data = data.take(idx)
expected = ord_data.groupby(ord_labels, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
tm.assert_index_equal(desc_result.index, expected.index)
tm.assert_index_equal(
desc_result.index.get_level_values(0), expected.index.get_level_values(0)
)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_categorical_index():
s = np.random.RandomState(12345)
levels = ["foo", "bar", "baz", "qux"]
codes = s.randint(0, 4, size=20)
cats = Categorical.from_codes(codes, levels, ordered=True)
df = DataFrame(np.repeat(np.arange(20), 4).reshape(-1, 4), columns=list("abcd"))
df["cats"] = cats
# with a cat index
result = df.set_index("cats").groupby(level=0, observed=False).sum()
expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()
expected.index = CategoricalIndex(
Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"
)
tm.assert_frame_equal(result, expected)
# with a cat column, should produce a cat index
result = df.groupby("cats", observed=False).sum()
expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()
expected.index = CategoricalIndex(
Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"
)
tm.assert_frame_equal(result, expected)
def test_describe_categorical_columns():
# GH 11558
cats = CategoricalIndex(
["qux", "foo", "baz", "bar"],
categories=["foo", "bar", "baz", "qux"],
ordered=True,
)
df = DataFrame(np.random.randn(20, 4), columns=cats)
result = df.groupby([1, 2, 3, 4] * 5).describe()
tm.assert_index_equal(result.stack().columns, cats)
tm.assert_categorical_equal(result.stack().columns.values, cats.values)
def test_unstack_categorical():
# GH11558 (example is taken from the original issue)
df = DataFrame(
{"a": range(10), "medium": ["A", "B"] * 5, "artist": list("XYXXY") * 2}
)
df["medium"] = df["medium"].astype("category")
gcat = df.groupby(["artist", "medium"], observed=False)["a"].count().unstack()
result = gcat.describe()
exp_columns = CategoricalIndex(["A", "B"], ordered=False, name="medium")
tm.assert_index_equal(result.columns, exp_columns)
tm.assert_categorical_equal(result.columns.values, exp_columns.values)
result = gcat["A"] + gcat["B"]
expected = Series([6, 4], index=Index(["X", "Y"], name="artist"))
tm.assert_series_equal(result, expected)
def test_bins_unequal_len():
# GH3011
series = Series([np.nan, np.nan, 1, 1, 2, 2, 3, 3, 4, 4])
bins = pd.cut(series.dropna().values, 4)
# len(bins) != len(series) here
msg = r"Length of grouper \(8\) and axis \(10\) must be same length"
with pytest.raises(ValueError, match=msg):
series.groupby(bins).mean()
def test_as_index():
# GH13204
df = DataFrame(
{
"cat": Categorical([1, 2, 2], [1, 2, 3]),
"A": [10, 11, 11],
"B": [101, 102, 103],
}
)
result = df.groupby(["cat", "A"], as_index=False, observed=True).sum()
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 11],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
tm.assert_frame_equal(result, expected)
# function grouper
f = lambda r: df.loc[r, "A"]
result = df.groupby(["cat", f], as_index=False, observed=True).sum()
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 22],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
tm.assert_frame_equal(result, expected)
# another not in-axis grouper (conflicting names in index)
s = Series(["a", "b", "b"], name="cat")
result = df.groupby(["cat", s], as_index=False, observed=True).sum()
tm.assert_frame_equal(result, expected)
# is original index dropped?
group_columns = ["cat", "A"]
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 11],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
for name in [None, "X", "B"]:
df.index = Index(list("abc"), name=name)
result = df.groupby(group_columns, as_index=False, observed=True).sum()
tm.assert_frame_equal(result, expected)
def test_preserve_categories():
# GH-13179
categories = list("abc")
# ordered=True
df = DataFrame({"A": Categorical(list("ba"), categories=categories, ordered=True)})
index = CategoricalIndex(categories, categories, ordered=True, name="A")
tm.assert_index_equal(
df.groupby("A", sort=True, observed=False).first().index, index
)
tm.assert_index_equal(
df.groupby("A", sort=False, observed=False).first().index, index
)
# ordered=False
df = DataFrame({"A": Categorical(list("ba"), categories=categories, ordered=False)})
sort_index = CategoricalIndex(categories, categories, ordered=False, name="A")
nosort_index = CategoricalIndex(list("bac"), list("bac"), ordered=False, name="A")
tm.assert_index_equal(
df.groupby("A", sort=True, observed=False).first().index, sort_index
)
tm.assert_index_equal(
df.groupby("A", sort=False, observed=False).first().index, nosort_index
)
def test_preserve_categorical_dtype():
# GH13743, GH13854
df = DataFrame(
{
"A": [1, 2, 1, 1, 2],
"B": [10, 16, 22, 28, 34],
"C1": Categorical(list("abaab"), categories=list("bac"), ordered=False),
"C2": Categorical(list("abaab"), categories=list("bac"), ordered=True),
}
)
# single grouper
exp_full = DataFrame(
{
"A": [2.0, 1.0, np.nan],
"B": [25.0, 20.0, np.nan],
"C1": Categorical(list("bac"), categories=list("bac"), ordered=False),
"C2": Categorical(list("bac"), categories=list("bac"), ordered=True),
}
)
for col in ["C1", "C2"]:
result1 = df.groupby(by=col, as_index=False, observed=False).mean()
result2 = df.groupby(by=col, as_index=True, observed=False).mean().reset_index()
expected = exp_full.reindex(columns=result1.columns)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
@pytest.mark.parametrize(
"func, values",
[
("first", ["second", "first"]),
("last", ["fourth", "third"]),
("min", ["fourth", "first"]),
("max", ["second", "third"]),
],
)
def test_preserve_on_ordered_ops(func, values):
# gh-18502
# preserve the categoricals on ops
c = Categorical(["first", "second", "third", "fourth"], ordered=True)
df = DataFrame({"payload": [-1, -2, -1, -2], "col": c})
g = df.groupby("payload")
result = getattr(g, func)()
expected = DataFrame(
{"payload": [-2, -1], "col": Series(values, dtype=c.dtype)}
).set_index("payload")
tm.assert_frame_equal(result, expected)
def test_categorical_no_compress():
data = Series(np.random.randn(9))
codes = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
cats = Categorical.from_codes(codes, [0, 1, 2], ordered=True)
result = data.groupby(cats, observed=False).mean()
exp = data.groupby(codes, observed=False).mean()
exp.index = CategoricalIndex(
exp.index, categories=cats.categories, ordered=cats.ordered
)
tm.assert_series_equal(result, exp)
codes = np.array([0, 0, 0, 1, 1, 1, 3, 3, 3])
cats = Categorical.from_codes(codes, [0, 1, 2, 3], ordered=True)
result = data.groupby(cats, observed=False).mean()
exp = data.groupby(codes, observed=False).mean().reindex(cats.categories)
exp.index = CategoricalIndex(
exp.index, categories=cats.categories, ordered=cats.ordered
)
tm.assert_series_equal(result, exp)
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
result = data.groupby("b", observed=False).mean()
result = result["a"].values
exp = np.array([1, 2, 4, np.nan])
tm.assert_numpy_array_equal(result, exp)
def test_groupby_empty_with_category():
# GH-9614
# test fix for when group by on None resulted in
# coercion of dtype categorical -> float
df = DataFrame({"A": [None] * 3, "B": Categorical(["train", "train", "test"])})
result = df.groupby("A").first()["B"]
expected = Series(
Categorical([], categories=["test", "train"]),
index=Series([], dtype="object", name="A"),
name="B",
)
tm.assert_series_equal(result, expected)
def test_sort():
# https://stackoverflow.com/questions/23814368/sorting-pandas-
# categorical-labels-after-groupby
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
# self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
df = DataFrame({"value": np.random.randint(0, 10000, 100)})
labels = [f"{i} - {i+499}" for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=["value"], ascending=True)
df["value_group"] = pd.cut(
df.value, range(0, 10500, 500), right=False, labels=cat_labels
)
res = df.groupby(["value_group"], observed=False)["value_group"].count()
exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
exp.index = CategoricalIndex(exp.index, name=exp.index.name)
tm.assert_series_equal(res, exp)
def test_sort2():
# dataframe groupby sort was being ignored # GH 8868
df = DataFrame(
[
["(7.5, 10]", 10, 10],
["(7.5, 10]", 8, 20],
["(2.5, 5]", 5, 30],
["(5, 7.5]", 6, 40],
["(2.5, 5]", 4, 50],
["(0, 2.5]", 1, 60],
["(5, 7.5]", 7, 70],
],
columns=["range", "foo", "bar"],
)
df["range"] = Categorical(df["range"], ordered=True)
index = CategoricalIndex(
["(0, 2.5]", "(2.5, 5]", "(5, 7.5]", "(7.5, 10]"], name="range", ordered=True
)
expected_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"], index=index
)
col = "range"
result_sort = df.groupby(col, sort=True, observed=False).first()
tm.assert_frame_equal(result_sort, expected_sort)
# when categories is ordered, group is ordered by category's order
expected_sort = result_sort
result_sort = df.groupby(col, sort=False, observed=False).first()
tm.assert_frame_equal(result_sort, expected_sort)
df["range"] = Categorical(df["range"], ordered=False)
index = CategoricalIndex(
["(0, 2.5]", "(2.5, 5]", "(5, 7.5]", "(7.5, 10]"], name="range"
)
expected_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"], index=index
)
index = CategoricalIndex(
["(7.5, 10]", "(2.5, 5]", "(5, 7.5]", "(0, 2.5]"],
categories=["(7.5, 10]", "(2.5, 5]", "(5, 7.5]", "(0, 2.5]"],
name="range",
)
expected_nosort = DataFrame(
[[10, 10], [5, 30], [6, 40], [1, 60]], index=index, columns=["foo", "bar"]
)
col = "range"
# this is an unordered categorical, but we allow this ####
result_sort = df.groupby(col, sort=True, observed=False).first()
tm.assert_frame_equal(result_sort, expected_sort)
result_nosort = df.groupby(col, sort=False, observed=False).first()
tm.assert_frame_equal(result_nosort, expected_nosort)
def test_sort_datetimelike():
# GH10505
# use same data as test_groupby_sort_categorical, which category is
# corresponding to datetime.month
df = DataFrame(
{
"dt": [
datetime(2011, 7, 1),
datetime(2011, 7, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 2, 1),
datetime(2011, 1, 1),
datetime(2011, 5, 1),
],
"foo": [10, 8, 5, 6, 4, 1, 7],
"bar": [10, 20, 30, 40, 50, 60, 70],
},
columns=["dt", "foo", "bar"],
)
# ordered=True
df["dt"] = Categorical(df["dt"], ordered=True)
index = [
datetime(2011, 1, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 7, 1),
]
result_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"]
)
result_sort.index = CategoricalIndex(index, name="dt", ordered=True)
index = [
datetime(2011, 7, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 1, 1),
]
result_nosort = DataFrame(
[[10, 10], [5, 30], [6, 40], [1, 60]], columns=["foo", "bar"]
)
result_nosort.index = CategoricalIndex(
index, categories=index, name="dt", ordered=True
)
col = "dt"
tm.assert_frame_equal(
result_sort, df.groupby(col, sort=True, observed=False).first()
)
# when categories is ordered, group is ordered by category's order
tm.assert_frame_equal(
result_sort, df.groupby(col, sort=False, observed=False).first()
)
# ordered = False
df["dt"] = Categorical(df["dt"], ordered=False)
index = [
datetime(2011, 1, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 7, 1),
]
result_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"]
)
result_sort.index = CategoricalIndex(index, name="dt")
index = [
datetime(2011, 7, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 1, 1),
]
result_nosort = DataFrame(
[[10, 10], [5, 30], [6, 40], [1, 60]], columns=["foo", "bar"]
)
result_nosort.index = CategoricalIndex(index, categories=index, name="dt")
col = "dt"
tm.assert_frame_equal(
result_sort, df.groupby(col, sort=True, observed=False).first()
)
tm.assert_frame_equal(
result_nosort, df.groupby(col, sort=False, observed=False).first()
)
def test_empty_sum():
# https://github.com/pandas-dev/pandas/issues/18678
df = DataFrame(
{"A": Categorical(["a", "a", "b"], categories=["a", "b", "c"]), "B": [1, 2, 1]}
)
expected_idx = CategoricalIndex(["a", "b", "c"], name="A")
# 0 by default
result = df.groupby("A", observed=False).B.sum()
expected = Series([3, 1, 0], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=0
result = df.groupby("A", observed=False).B.sum(min_count=0)
expected = Series([3, 1, 0], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=1
result = df.groupby("A", observed=False).B.sum(min_count=1)
expected = Series([3, 1, np.nan], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count>1
result = df.groupby("A", observed=False).B.sum(min_count=2)
expected = Series([3, np.nan, np.nan], expected_idx, name="B")
tm.assert_series_equal(result, expected)
def test_empty_prod():
# https://github.com/pandas-dev/pandas/issues/18678
df = DataFrame(
{"A": Categorical(["a", "a", "b"], categories=["a", "b", "c"]), "B": [1, 2, 1]}
)
expected_idx = CategoricalIndex(["a", "b", "c"], name="A")
# 1 by default
result = df.groupby("A", observed=False).B.prod()
expected = Series([2, 1, 1], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=0
result = df.groupby("A", observed=False).B.prod(min_count=0)
expected = Series([2, 1, 1], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=1
result = df.groupby("A", observed=False).B.prod(min_count=1)
expected = Series([2, 1, np.nan], expected_idx, name="B")
tm.assert_series_equal(result, expected)
def test_groupby_multiindex_categorical_datetime():
# https://github.com/pandas-dev/pandas/issues/21390
df = DataFrame(
{
"key1": Categorical(list("<KEY>")),
"key2": Categorical(
list(pd.date_range("2018-06-01 00", freq="1T", periods=3)) * 3
),
"values": np.arange(9),
}
)
result = df.groupby(["key1", "key2"]).mean()
idx = MultiIndex.from_product(
[
Categorical(["a", "b", "c"]),
Categorical(pd.date_range("2018-06-01 00", freq="1T", periods=3)),
],
names=["key1", "key2"],
)
expected = DataFrame({"values": [0, 4, 8, 3, 4, 5, 6, np.nan, 2]}, index=idx)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"as_index, expected",
[
(
True,
Series(
index=MultiIndex.from_arrays(
[Series([1, 1, 2], dtype="category"), [1, 2, 2]], names=["a", "b"]
),
data=[1, 2, 3],
name="x",
),
),
(
False,
DataFrame(
{
"a": Series([1, 1, 2], dtype="category"),
"b": [1, 2, 2],
"x": [1, 2, 3],
}
),
),
],
)
def test_groupby_agg_observed_true_single_column(as_index, expected):
# GH-23970
df = DataFrame(
{"a": Series([1, 1, 2], dtype="category"), "b": [1, 2, 2], "x": [1, 2, 3]}
)
result = df.groupby(["a", "b"], as_index=as_index, observed=True)["x"].sum()
tm.assert_equal(result, expected)
@pytest.mark.parametrize("fill_value", [None, np.nan, pd.NaT])
def test_shift(fill_value):
ct = Categorical(
["a", "b", "c", "d"], categories=["a", "b", "c", "d"], ordered=False
)
expected = Categorical(
[None, "a", "b", "c"], categories=["a", "b", "c", "d"], ordered=False
)
res = ct.shift(1, fill_value=fill_value)
tm.assert_equal(res, expected)
@pytest.fixture
def df_cat(df):
"""
DataFrame with multiple categorical columns and a column of integers.
Shortened so as not to contain all possible combinations of categories.
Useful for testing `observed` kwarg functionality on GroupBy objects.
Parameters
----------
df: DataFrame
Non-categorical, longer DataFrame from another fixture, used to derive
this one
Returns
-------
df_cat: DataFrame
"""
df_cat = df.copy()[:4] # leave out some groups
df_cat["A"] = df_cat["A"].astype("category")
df_cat["B"] = df_cat["B"].astype("category")
df_cat["C"] = Series([1, 2, 3, 4])
df_cat = df_cat.drop(["D"], axis=1)
return df_cat
@pytest.mark.parametrize(
"operation, kwargs", [("agg", {"dtype": "category"}), ("apply", {})]
)
def test_seriesgroupby_observed_true(df_cat, operation, kwargs):
# GH 24880
index = MultiIndex.from_frame(
DataFrame(
{"A": ["foo", "foo", "bar", "bar"], "B": ["one", "two", "one", "three"]},
**kwargs,
)
)
expected = Series(data=[1, 3, 2, 4], index=index, name="C")
grouped = df_cat.groupby(["A", "B"], observed=True)["C"]
result = getattr(grouped, operation)(sum)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("operation", ["agg", "apply"])
@pytest.mark.parametrize("observed", [False, None])
def test_seriesgroupby_observed_false_or_none(df_cat, observed, operation):
# GH 24880
index, _ = MultiIndex.from_product(
[
CategoricalIndex(["bar", "foo"], ordered=False),
CategoricalIndex(["one", "three", "two"], ordered=False),
],
names=["A", "B"],
).sortlevel()
expected = Series(data=[2, 4, np.nan, 1, np.nan, 3], index=index, name="C")
if operation == "agg":
expected = expected.fillna(0, downcast="infer")
grouped = df_cat.groupby(["A", "B"], observed=observed)["C"]
result = getattr(grouped, operation)(sum)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"observed, index, data",
[
(
True,
MultiIndex.from_tuples(
[
("foo", "one", "min"),
("foo", "one", "max"),
("foo", "two", "min"),
("foo", "two", "max"),
("bar", "one", "min"),
("bar", "one", "max"),
("bar", "three", "min"),
("bar", "three", "max"),
],
names=["A", "B", None],
),
[1, 1, 3, 3, 2, 2, 4, 4],
),
(
False,
MultiIndex.from_product(
[
CategoricalIndex(["bar", "foo"], ordered=False),
CategoricalIndex(["one", "three", "two"], ordered=False),
Index(["min", "max"]),
],
names=["A", "B", None],
),
[2, 2, 4, 4, np.nan, np.nan, 1, 1, np.nan, np.nan, 3, 3],
),
(
None,
MultiIndex.from_product(
[
CategoricalIndex(["bar", "foo"], ordered=False),
CategoricalIndex(["one", "three", "two"], ordered=False),
Index(["min", "max"]),
],
names=["A", "B", None],
),
[2, 2, 4, 4, np.nan, np.nan, 1, 1, np.nan, np.nan, 3, 3],
),
],
)
def test_seriesgroupby_observed_apply_dict(df_cat, observed, index, data):
# GH 24880
expected = Series(data=data, index=index, name="C")
result = df_cat.groupby(["A", "B"], observed=observed)["C"].apply(
lambda x: {"min": x.min(), "max": x.max()}
)
tm.assert_series_equal(result, expected)
def test_groupby_categorical_series_dataframe_consistent(df_cat):
# GH 20416
expected = df_cat.groupby(["A", "B"])["C"].mean()
result = df_cat.groupby(["A", "B"]).mean()["C"]
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("code", [([1, 0, 0]), ([0, 0, 0])])
def test_groupby_categorical_axis_1(code):
# GH 13420
df = DataFrame({"a": [1, 2, 3, 4], "b": [-1, -2, -3, -4], "c": [5, 6, 7, 8]})
cat = Categorical.from_codes(code, categories=list("abc"))
result = df.groupby(cat, axis=1).mean()
expected = df.T.groupby(cat, axis=0).mean().T
tm.assert_frame_equal(result, expected)
def test_groupby_cat_preserves_structure(observed, ordered):
# GH 28787
df = DataFrame(
{"Name": Categorical(["Bob", "Greg"], ordered=ordered), "Item": [1, 2]},
columns=["Name", "Item"],
)
expected = df.copy()
result = (
df.groupby("Name", observed=observed)
.agg(DataFrame.sum, skipna=True)
.reset_index()
)
tm.assert_frame_equal(result, expected)
def test_get_nonexistent_category():
# Accessing a Category that is not in the dataframe
df = DataFrame({"var": ["a", "a", "b", "b"], "val": range(4)})
with pytest.raises(KeyError, match="'vau'"):
df.groupby("var").apply(
lambda rows: DataFrame(
{"var": [rows.iloc[-1]["var"]], "val": [rows.iloc[-1]["vau"]]}
)
)
def test_series_groupby_on_2_categoricals_unobserved(reduction_func, observed, request):
# GH 17605
if reduction_func == "ngroup":
pytest.skip("ngroup is not truly a reduction")
if reduction_func == "corrwith": # GH 32293
mark = pytest.mark.xfail(
reason="TODO: implemented SeriesGroupBy.corrwith. See GH 32293"
)
request.node.add_marker(mark)
df = DataFrame(
{
"cat_1": Categorical(list("AABB"), categories=list("ABCD")),
"cat_2": Categorical(list("AB") * 2, categories=list("ABCD")),
"value": [0.1] * 4,
}
)
args = {"nth": [0]}.get(reduction_func, [])
expected_length = 4 if observed else 16
series_groupby = df.groupby(["cat_1", "cat_2"], observed=observed)["value"]
agg = getattr(series_groupby, reduction_func)
result = agg(*args)
assert len(result) == expected_length
def test_series_groupby_on_2_categoricals_unobserved_zeroes_or_nans(
reduction_func, request
):
# GH 17605
# Tests whether the unobserved categories in the result contain 0 or NaN
if reduction_func == "ngroup":
pytest.skip("ngroup is not truly a reduction")
if reduction_func == "corrwith": # GH 32293
mark = pytest.mark.xfail(
reason="TODO: implemented SeriesGroupBy.corrwith. See GH 32293"
)
request.node.add_marker(mark)
df = DataFrame(
{
"cat_1": Categorical(list("AABB"), categories=list("ABC")),
"cat_2": Categorical(list("AB") * 2, categories=list("ABC")),
"value": [0.1] * 4,
}
)
unobserved = [tuple("AC"), tuple("BC"), tuple("CA"), tuple("CB"), tuple("CC")]
args = {"nth": [0]}.get(reduction_func, [])
series_groupby = df.groupby(["cat_1", "cat_2"], observed=False)["value"]
agg = getattr(series_groupby, reduction_func)
result = agg(*args)
zero_or_nan = _results_for_groupbys_with_missing_categories[reduction_func]
for idx in unobserved:
val = result.loc[idx]
assert (pd.isna(zero_or_nan) and pd.isna(val)) or (val == zero_or_nan)
# If we expect unobserved values to be zero, we also expect the dtype to be int.
# Except for .sum(). If the observed categories sum to dtype=float (i.e. their
# sums have decimals), then the zeros for the missing categories should also be
# floats.
if zero_or_nan == 0 and reduction_func != "sum":
assert np.issubdtype(result.dtype, np.integer)
def test_dataframe_groupby_on_2_categoricals_when_observed_is_true(reduction_func):
# GH 23865
# GH 27075
# Ensure that df.groupby, when 'by' is two Categorical variables,
# does not return the categories that are not in df when observed=True
if reduction_func == "ngroup":
pytest.skip("ngroup does not return the Categories on the index")
df = DataFrame(
{
"cat_1": Categorical(list("AABB"), categories=list("ABC")),
"cat_2": Categorical(list("1111"), categories=list("12")),
"value": [0.1, 0.1, 0.1, 0.1],
}
)
unobserved_cats = [("A", "2"), ("B", "2"), ("C", "1"), ("C", "2")]
df_grp = df.groupby(["cat_1", "cat_2"], observed=True)
args = {"nth": [0], "corrwith": [df]}.get(reduction_func, [])
res = getattr(df_grp, reduction_func)(*args)
for cat in unobserved_cats:
assert cat not in res.index
@pytest.mark.parametrize("observed", [False, None])
def test_dataframe_groupby_on_2_categoricals_when_observed_is_false(
reduction_func, observed, request
):
# GH 23865
# GH 27075
# Ensure that df.groupby, when 'by' is two Categorical variables,
# returns the categories that are not in df when observed=False/None
if reduction_func == "ngroup":
pytest.skip("ngroup does not return the Categories on the index")
df = DataFrame(
{
"cat_1": Categorical(list("AABB"), categories=list("ABC")),
"cat_2": Categorical(list("1111"), categories=list("12")),
"value": [0.1, 0.1, 0.1, 0.1],
}
)
unobserved_cats = [("A", "2"), ("B", "2"), ("C", "1"), ("C", "2")]
df_grp = df.groupby(["cat_1", "cat_2"], observed=observed)
args = {"nth": [0], "corrwith": [df]}.get(reduction_func, [])
res = getattr(df_grp, reduction_func)(*args)
expected = _results_for_groupbys_with_missing_categories[reduction_func]
if expected is np.nan:
assert res.loc[unobserved_cats].isnull().all().all()
else:
assert (res.loc[unobserved_cats] == expected).all().all()
def test_series_groupby_categorical_aggregation_getitem():
# GH 8870
d = {"foo": [10, 8, 4, 1], "bar": [10, 20, 30, 40], "baz": ["d", "c", "d", "c"]}
df = DataFrame(d)
cat = pd.cut(df["foo"], np.linspace(0, 20, 5))
df["range"] = cat
groups = df.groupby(["range", "baz"], as_index=True, sort=True)
result = groups["foo"].agg("mean")
expected = groups.agg("mean")["foo"]
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"func, expected_values",
[(Series.nunique, [1, 1, 2]), (Series.count, [1, 2, 2])],
)
def test_groupby_agg_categorical_columns(func, expected_values):
# 31256
df = DataFrame(
{
"id": [0, 1, 2, 3, 4],
"groups": [0, 1, 1, 2, 2],
"value": Categorical([0, 0, 0, 0, 1]),
}
).set_index("id")
result = df.groupby("groups").agg(func)
expected = DataFrame(
{"value": expected_values}, index=Index([0, 1, 2], name="groups")
)
tm.assert_frame_equal(result, expected)
def test_groupby_agg_non_numeric():
df = DataFrame({"A": Categorical(["a", "a", "b"], categories=["a", "b", "c"])})
expected = DataFrame({"A": [2, 1]}, index=[1, 2])
result = df.groupby([1, 2, 1]).agg(Series.nunique)
tm.assert_frame_equal(result, expected)
result = df.groupby([1, 2, 1]).nunique()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("func", ["first", "last"])
def test_groupy_first_returned_categorical_instead_of_dataframe(func):
# GH 28641: groupby drops index, when grouping over categorical column with
# first/last. Renamed Categorical instead of DataFrame previously.
df = DataFrame({"A": [1997], "B": Series(["b"], dtype="category").cat.as_ordered()})
df_grouped = df.groupby("A")["B"]
result = getattr(df_grouped, func)()
expected = Series(["b"], index=Index([1997], name="A"), name="B")
tm.assert_series_equal(result, expected)
def test_read_only_category_no_sort():
# GH33410
cats = np.array([1, 2])
cats.flags.writeable = False
df = DataFrame(
{"a": [1, 3, 5, 7], "b": Categorical([1, 1, 2, 2], categories=Index(cats))}
)
expected = DataFrame(data={"a": [2, 6]}, index=CategoricalIndex([1, 2], name="b"))
result = df.groupby("b", sort=False).mean()
tm.assert_frame_equal(result, expected)
def test_sorted_missing_category_values():
# GH 28597
df = DataFrame(
{
"foo": [
"small",
"large",
"large",
"large",
"medium",
"large",
"large",
"medium",
],
"bar": ["C", "A", "A", "C", "A", "C", "A", "C"],
}
)
df["foo"] = (
df["foo"]
.astype("category")
.cat.set_categories(["tiny", "small", "medium", "large"], ordered=True)
)
expected = DataFrame(
{
"tiny": {"A": 0, "C": 0},
"small": {"A": 0, "C": 1},
"medium": {"A": 1, "C": 1},
"large": {"A": 3, "C": 2},
}
)
expected = expected.rename_axis("bar", axis="index")
expected.columns = CategoricalIndex(
["tiny", "small", "medium", "large"],
categories=["tiny", "small", "medium", "large"],
ordered=True,
name="foo",
dtype="category",
)
result = df.groupby(["bar", "foo"]).size().unstack()
tm.assert_frame_equal(result, expected)
def test_agg_cython_category_not_implemented_fallback():
# https://github.com/pandas-dev/pandas/issues/31450
df = DataFrame({"col_num": [1, 1, 2, 3]})
df["col_cat"] = df["col_num"].astype("category")
result = df.groupby("col_num").col_cat.first()
expected = Series([1, 2, 3], index=Index([1, 2, 3], name="col_num"), name="col_cat")
tm.assert_series_equal(result, expected)
result = df.groupby("col_num").agg({"col_cat": "first"})
expected = expected.to_frame()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("func", ["min", "max"])
def test_aggregate_categorical_lost_index(func: str):
# GH: 28641 groupby drops index, when grouping over categorical column with min/max
ds = Series(["b"], dtype="category").cat.as_ordered()
df = DataFrame({"A": [1997], "B": ds})
result = df.groupby("A").agg({"B": func})
expected = DataFrame({"B": ["b"]}, index=Index([1997], name="A"))
tm.assert_frame_equal(result, expected)
def test_aggregate_categorical_with_isnan():
# GH 29837
df = DataFrame(
{
"A": [1, 1, 1, 1],
"B": [1, 2, 1, 2],
"numerical_col": [0.1, 0.2, np.nan, 0.3],
"object_col": ["foo", "bar", "foo", "fee"],
"categorical_col": ["foo", "bar", "foo", "fee"],
}
)
df = df.astype({"categorical_col": "category"})
result = df.groupby(["A", "B"]).agg(lambda df: df.isna().sum())
index = pd.MultiIndex.from_arrays([[1, 1], [1, 2]], names=("A", "B"))
expected = DataFrame(
data={
"numerical_col": [1.0, 0.0],
"object_col": [0, 0],
"categorical_col": [0, 0],
},
index=index,
)
tm.assert_frame_equal(result, expected)
def test_categorical_transform():
# GH 29037
df = DataFrame(
{
"package_id": [1, 1, 1, 2, 2, 3],
"status": [
"Waiting",
"OnTheWay",
"Delivered",
"Waiting",
"OnTheWay",
"Waiting",
],
}
)
delivery_status_type = pd.CategoricalDtype(
categories=["Waiting", "OnTheWay", "Delivered"], ordered=True
)
df["status"] = df["status"].astype(delivery_status_type)
df["last_status"] = df.groupby("package_id")["status"].transform(max)
result = df.copy()
expected = DataFrame(
{
"package_id": [1, 1, 1, 2, 2, 3],
"status": [
"Waiting",
"OnTheWay",
"Delivered",
"Waiting",
"OnTheWay",
"Waiting",
],
"last_status": [
"Delivered",
"Delivered",
"Delivered",
"OnTheWay",
"OnTheWay",
"Waiting",
],
}
)
expected["status"] = expected["status"].astype(delivery_status_type)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("func", ["first", "last"])
def test_series_groupby_first_on_categorical_col_grouped_on_2_categoricals(
func: str, observed: bool
):
# GH 34951
cat = Categorical([0, 0, 1, 1])
val = [0, 1, 1, 0]
df = DataFrame({"a": cat, "b": cat, "c": val})
idx = Categorical([0, 1])
idx = pd.MultiIndex.from_product([idx, idx], names=["a", "b"])
expected_dict = {
"first": Series([0, np.NaN, np.NaN, 1], idx, name="c"),
"last": Series([1, np.NaN, np.NaN, 0], idx, name="c"),
}
expected = expected_dict[func]
if observed:
expected = expected.dropna().astype(np.int64)
srs_grp = df.groupby(["a", "b"], observed=observed)["c"]
result = getattr(srs_grp, func)()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("func", ["first", "last"])
def test_df_groupby_first_on_categorical_col_grouped_on_2_categoricals(
func: str, observed: bool
):
# GH 34951
cat = Categorical([0, 0, 1, 1])
val = [0, 1, 1, 0]
df = DataFrame({"a": cat, "b": cat, "c": val})
idx = Categorical([0, 1])
idx = pd.MultiIndex.from_product([idx, idx], names=["a", "b"])
expected_dict = {
"first": Series([0, np.NaN, np.NaN, 1], idx, name="c"),
"last": Series([1, np.NaN, np.NaN, 0], idx, name="c"),
}
expected = expected_dict[func].to_frame()
if observed:
expected = expected.dropna().astype(np.int64)
df_grp = df.groupby(["a", "b"], observed=observed)
result = getattr(df_grp, func)()
tm.assert_frame_equal(result, expected)
| [
"pandas._testing.assert_equal",
"numpy.sum",
"pandas._testing.assert_dict_equal",
"numpy.random.randint",
"numpy.arange",
"numpy.mean",
"pytest.mark.parametrize",
"pandas._testing.assert_numpy_array_equal",
"pandas.CategoricalIndex",
"pandas.DataFrame",
"numpy.random.randn",
"pandas.Categorica... | [((7791, 7840), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ordered"""', '[True, False]'], {}), "('ordered', [True, False])\n", (7814, 7840), False, 'import pytest\n'), ((16907, 16956), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ordered"""', '[True, False]'], {}), "('ordered', [True, False])\n", (16930, 16956), False, 'import pytest\n'), ((16959, 17009), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""observed"""', '[True, False]'], {}), "('observed', [True, False])\n", (16982, 17009), False, 'import pytest\n'), ((17012, 17058), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sort"""', '[True, False]'], {}), "('sort', [True, False])\n", (17035, 17058), False, 'import pytest\n'), ((26162, 26336), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func, values"""', "[('first', ['second', 'first']), ('last', ['fourth', 'third']), ('min', [\n 'fourth', 'first']), ('max', ['second', 'third'])]"], {}), "('func, values', [('first', ['second', 'first']), (\n 'last', ['fourth', 'third']), ('min', ['fourth', 'first']), ('max', [\n 'second', 'third'])])\n", (26185, 26336), False, 'import pytest\n'), ((37917, 37978), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""fill_value"""', '[None, np.nan, pd.NaT]'], {}), "('fill_value', [None, np.nan, pd.NaT])\n", (37940, 37978), False, 'import pytest\n'), ((39057, 39155), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""operation, kwargs"""', "[('agg', {'dtype': 'category'}), ('apply', {})]"], {}), "('operation, kwargs', [('agg', {'dtype': 'category'}\n ), ('apply', {})])\n", (39080, 39155), False, 'import pytest\n'), ((39651, 39705), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""operation"""', "['agg', 'apply']"], {}), "('operation', ['agg', 'apply'])\n", (39674, 39705), False, 'import pytest\n'), ((39708, 39758), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""observed"""', '[False, None]'], {}), "('observed', [False, None])\n", (39731, 39758), False, 'import pytest\n'), ((42525, 42580), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""code"""', '[[1, 0, 0], [0, 0, 0]]'], {}), "('code', [[1, 0, 0], [0, 0, 0]])\n", (42548, 42580), False, 'import pytest\n'), ((47304, 47354), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""observed"""', '[False, None]'], {}), "('observed', [False, None])\n", (47327, 47354), False, 'import pytest\n'), ((48972, 49083), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func, expected_values"""', '[(Series.nunique, [1, 1, 2]), (Series.count, [1, 2, 2])]'], {}), "('func, expected_values', [(Series.nunique, [1, 1, 2\n ]), (Series.count, [1, 2, 2])])\n", (48995, 49083), False, 'import pytest\n'), ((49950, 50000), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func"""', "['first', 'last']"], {}), "('func', ['first', 'last'])\n", (49973, 50000), False, 'import pytest\n'), ((52679, 52726), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func"""', "['min', 'max']"], {}), "('func', ['min', 'max'])\n", (52702, 52726), False, 'import pytest\n'), ((55262, 55312), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func"""', "['first', 'last']"], {}), "('func', ['first', 'last'])\n", (55285, 55312), False, 'import pytest\n'), ((56080, 56130), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""func"""', "['first', 'last']"], {}), "('func', ['first', 'last'])\n", (56103, 56130), False, 'import pytest\n'), ((1881, 1894), 'pandas.qcut', 'qcut', (['df.C', '(4)'], {}), '(df.C, 4)\n', (1885, 1894), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((2237, 2346), 'pandas.Categorical', 'Categorical', (["['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c']"], {'categories': "['a', 'b', 'c', 'd']", 'ordered': '(True)'}), "(['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c'], categories=['a',\n 'b', 'c', 'd'], ordered=True)\n", (2248, 2346), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((2390, 2446), 'pandas.DataFrame', 'DataFrame', (["{'a': [1, 1, 1, 2, 2, 2, 3, 4, 5], 'b': cats}"], {}), "({'a': [1, 1, 1, 2, 2, 2, 3, 4, 5], 'b': cats})\n", (2399, 2446), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((2537, 2589), 'pandas.DataFrame', 'DataFrame', (["{'a': [1, 2, 4, np.nan]}"], {'index': 'exp_index'}), "({'a': [1, 2, 4, np.nan]}, index=exp_index)\n", (2546, 2589), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((2650, 2689), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (2671, 2689), True, 'import pandas._testing as tm\n'), ((2704, 2779), 'pandas.Categorical', 'Categorical', (["['a', 'a', 'b', 'b']"], {'categories': "['a', 'b', 'z']", 'ordered': '(True)'}), "(['a', 'a', 'b', 'b'], categories=['a', 'b', 'z'], ordered=True)\n", (2715, 2779), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((2792, 2867), 'pandas.Categorical', 'Categorical', (["['c', 'd', 'c', 'd']"], {'categories': "['c', 'd', 'y']", 'ordered': '(True)'}), "(['c', 'd', 'c', 'd'], categories=['c', 'd', 'y'], ordered=True)\n", (2803, 2867), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((2878, 2935), 'pandas.DataFrame', 'DataFrame', (["{'A': cat1, 'B': cat2, 'values': [1, 2, 3, 4]}"], {}), "({'A': cat1, 'B': cat2, 'values': [1, 2, 3, 4]})\n", (2887, 2935), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((3017, 3074), 'pandas.CategoricalIndex', 'CategoricalIndex', (["['a', 'b', 'z']"], {'name': '"""A"""', 'ordered': '(True)'}), "(['a', 'b', 'z'], name='A', ordered=True)\n", (3033, 3074), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((3175, 3214), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (3196, 3214), True, 'import pandas._testing as tm\n'), ((3241, 3340), 'pandas.DataFrame', 'DataFrame', (["[[1, '<NAME>'], [2, '<NAME>'], [1, '<NAME>']]"], {'columns': "['person_id', 'person_name']"}), "([[1, '<NAME>'], [2, '<NAME>'], [1, '<NAME>']], columns=[\n 'person_id', 'person_name'])\n", (3250, 3340), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((3386, 3412), 'pandas.Categorical', 'Categorical', (['x.person_name'], {}), '(x.person_name)\n', (3397, 3412), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((3509, 3558), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', "x[['person_name']]"], {}), "(result, x[['person_name']])\n", (3530, 3558), True, 'import pandas._testing as tm\n'), ((3644, 3683), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (3665, 3683), True, 'import pandas._testing as tm\n'), ((3845, 3876), 'pandas.Index', 'Index', (['[1, 2]'], {'name': '"""person_id"""'}), "([1, 2], name='person_id')\n", (3850, 3876), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((3954, 3993), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (3975, 3993), True, 'import pandas._testing as tm\n'), ((4038, 4067), 'pandas.DataFrame', 'DataFrame', (["{'a': [5, 15, 25]}"], {}), "({'a': [5, 15, 25]})\n", (4047, 4067), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((4077, 4115), 'pandas.cut', 'pd.cut', (['df.a'], {'bins': '[0, 10, 20, 30, 40]'}), '(df.a, bins=[0, 10, 20, 30, 40])\n', (4083, 4115), True, 'import pandas as pd\n'), ((4184, 4223), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', "df['a']"], {}), "(result, df['a'])\n", (4206, 4223), True, 'import pandas._testing as tm\n'), ((4758, 4791), 'pandas.DataFrame', 'DataFrame', (["{'a': [5, 15, 25, -5]}"], {}), "({'a': [5, 15, 25, -5]})\n", (4767, 4791), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((4801, 4844), 'pandas.cut', 'pd.cut', (['df.a'], {'bins': '[-10, 0, 10, 20, 30, 40]'}), '(df.a, bins=[-10, 0, 10, 20, 30, 40])\n', (4807, 4844), True, 'import pandas as pd\n'), ((4913, 4952), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', "df['a']"], {}), "(result, df['a'])\n", (4935, 4952), True, 'import pandas._testing as tm\n'), ((5303, 5333), 'pandas.DataFrame', 'DataFrame', (["{'a': [1, 0, 0, 0]}"], {}), "({'a': [1, 0, 0, 0]})\n", (5312, 5333), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((5481, 5544), 'pandas.CategoricalIndex', 'CategoricalIndex', (['c.values.categories'], {'ordered': 'c.values.ordered'}), '(c.values.categories, ordered=c.values.ordered)\n', (5497, 5544), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((5561, 5598), 'pandas.Series', 'Series', (['[1, 0, 0, 0]'], {'index': 'exp_index'}), '([1, 0, 0, 0], index=exp_index)\n', (5567, 5598), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((5635, 5675), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (5657, 5675), True, 'import pandas._testing as tm\n'), ((5752, 5785), 'numpy.random.randint', 'np.random.randint', (['(0)', '(4)'], {'size': '(100)'}), '(0, 4, size=100)\n', (5769, 5785), True, 'import numpy as np\n'), ((5800, 5851), 'pandas.Categorical.from_codes', 'Categorical.from_codes', (['codes', 'levels'], {'ordered': '(True)'}), '(codes, levels, ordered=True)\n', (5822, 5851), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((6046, 6112), 'pandas.CategoricalIndex', 'CategoricalIndex', (['levels'], {'categories': 'cats.categories', 'ordered': '(True)'}), '(levels, categories=cats.categories, ordered=True)\n', (6062, 6112), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((6162, 6201), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (6183, 6201), True, 'import pandas._testing as tm\n'), ((6420, 6498), 'pandas.Categorical', 'Categorical', (['ord_labels'], {'ordered': '(True)', 'categories': "['foo', 'bar', 'baz', 'qux']"}), "(ord_labels, ordered=True, categories=['foo', 'bar', 'baz', 'qux'])\n", (6431, 6498), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((6602, 6646), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['desc_result', 'expected'], {}), '(desc_result, expected)\n', (6623, 6646), True, 'import pandas._testing as tm\n'), ((6757, 6779), 'pandas.CategoricalIndex', 'CategoricalIndex', (['expc'], {}), '(expc)\n', (6773, 6779), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((6872, 6942), 'pandas.Index', 'Index', (["(['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max'] * 4)"], {}), "(['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max'] * 4)\n", (6877, 6942), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((7745, 7784), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (7766, 7784), True, 'import pandas._testing as tm\n'), ((8120, 8185), 'pandas.DataFrame', 'DataFrame', (["{'missing': missing, 'dense': dense, 'values': values}"], {}), "({'missing': missing, 'dense': dense, 'values': values})\n", (8129, 8185), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((8329, 8397), 'pandas.MultiIndex.from_arrays', 'MultiIndex.from_arrays', (['[missing, dense]'], {'names': "['missing', 'dense']"}), "([missing, dense], names=['missing', 'dense'])\n", (8351, 8397), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((8414, 8467), 'pandas.DataFrame', 'DataFrame', (['[0, 1, 2.0]'], {'index': 'idx', 'columns': "['values']"}), "([0, 1, 2.0], index=idx, columns=['values'])\n", (8423, 8467), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((8739, 8778), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (8760, 8778), True, 'import pandas._testing as tm\n'), ((8884, 8923), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (8905, 8923), True, 'import pandas._testing as tm\n'), ((8966, 9005), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (8987, 9005), True, 'import pandas._testing as tm\n'), ((9088, 9156), 'pandas.MultiIndex.from_arrays', 'MultiIndex.from_arrays', (['[missing, dense]'], {'names': "['missing', 'dense']"}), "([missing, dense], names=['missing', 'dense'])\n", (9110, 9156), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((9173, 9193), 'pandas.Series', 'Series', (['(1)'], {'index': 'idx'}), '(1, index=idx)\n', (9179, 9193), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((9240, 9280), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (9262, 9280), True, 'import pandas._testing as tm\n'), ((9512, 9587), 'pandas.Categorical', 'Categorical', (["['a', 'a', 'b', 'b']"], {'categories': "['a', 'b', 'z']", 'ordered': '(True)'}), "(['a', 'a', 'b', 'b'], categories=['a', 'b', 'z'], ordered=True)\n", (9523, 9587), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((9600, 9675), 'pandas.Categorical', 'Categorical', (["['c', 'd', 'c', 'd']"], {'categories': "['c', 'd', 'y']", 'ordered': '(True)'}), "(['c', 'd', 'c', 'd'], categories=['c', 'd', 'y'], ordered=True)\n", (9611, 9675), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((9686, 9743), 'pandas.DataFrame', 'DataFrame', (["{'A': cat1, 'B': cat2, 'values': [1, 2, 3, 4]}"], {}), "({'A': cat1, 'B': cat2, 'values': [1, 2, 3, 4]})\n", (9695, 9743), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((9894, 9973), 'pandas.MultiIndex.from_arrays', 'MultiIndex.from_arrays', (["[cat1, cat2, ['foo', 'bar'] * 2]"], {'names': "['A', 'B', 'C']"}), "([cat1, cat2, ['foo', 'bar'] * 2], names=['A', 'B', 'C'])\n", (9916, 9973), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((10274, 10313), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (10295, 10313), True, 'import pandas._testing as tm\n'), ((10385, 10439), 'pandas.MultiIndex.from_arrays', 'MultiIndex.from_arrays', (['[cat1, cat2]'], {'names': "['A', 'B']"}), "([cat1, cat2], names=['A', 'B'])\n", (10407, 10439), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((10456, 10508), 'pandas.DataFrame', 'DataFrame', (["{'values': [1, 2, 3, 4]}"], {'index': 'exp_index'}), "({'values': [1, 2, 3, 4]}, index=exp_index)\n", (10465, 10508), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((10686, 10725), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (10707, 10725), True, 'import pandas._testing as tm\n'), ((10994, 11006), 'pandas.DataFrame', 'DataFrame', (['d'], {}), '(d)\n', (11003, 11006), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((11275, 11342), 'pandas.DataFrame', 'DataFrame', (["{'ints': [1.5, 1.5], 'val': [20.0, 30]}"], {'index': 'exp_index'}), "({'ints': [1.5, 1.5], 'val': [20.0, 30]}, index=exp_index)\n", (11284, 11342), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((11537, 11576), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (11558, 11576), True, 'import pandas._testing as tm\n'), ((12167, 12206), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (12188, 12206), True, 'import pandas._testing as tm\n'), ((12663, 12675), 'pandas.DataFrame', 'DataFrame', (['d'], {}), '(d)\n', (12672, 12675), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((12997, 13036), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (13018, 13036), True, 'import pandas._testing as tm\n'), ((13169, 13181), 'pandas.DataFrame', 'DataFrame', (['d'], {}), '(d)\n', (13178, 13181), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((13196, 13226), 'pandas.cut', 'pd.cut', (["df['C1']", '[1, 2, 3, 6]'], {}), "(df['C1'], [1, 2, 3, 6])\n", (13202, 13226), True, 'import pandas as pd\n'), ((13336, 13403), 'pandas.MultiIndex.from_arrays', 'MultiIndex.from_arrays', (['[values, [1, 2, 3, 4]]'], {'names': "['cat', 'C2']"}), "([values, [1, 2, 3, 4]], names=['cat', 'C2'])\n", (13358, 13403), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((13420, 13488), 'pandas.DataFrame', 'DataFrame', (["{'C1': [3, 3, 4, 5], 'C3': [10, 100, 200, 34]}"], {'index': 'idx'}), "({'C1': [3, 3, 4, 5], 'C3': [10, 100, 200, 34]}, index=idx)\n", (13429, 13488), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((13693, 13732), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (13714, 13732), True, 'import pandas._testing as tm\n'), ((14623, 14679), 'pandas.Categorical', 'Categorical', (["['a', 'c', 'a']"], {'categories': "['a', 'b', 'c']"}), "(['a', 'c', 'a'], categories=['a', 'b', 'c'])\n", (14634, 14679), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((14690, 14732), 'pandas.DataFrame', 'DataFrame', (["{'cat': cat, 'vals': [1, 2, 3]}"], {}), "({'cat': cat, 'vals': [1, 2, 3]})\n", (14699, 14732), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((15098, 15136), 'pandas._testing.assert_dict_equal', 'tm.assert_dict_equal', (['result', 'expected'], {}), '(result, expected)\n', (15118, 15136), True, 'import pandas._testing as tm\n'), ((15694, 15732), 'pandas._testing.assert_dict_equal', 'tm.assert_dict_equal', (['result', 'expected'], {}), '(result, expected)\n', (15714, 15732), True, 'import pandas._testing as tm\n'), ((15790, 15852), 'pandas.Categorical', 'Categorical', (["['a', np.nan, np.nan]"], {'categories': "['a', 'b', 'c']"}), "(['a', np.nan, np.nan], categories=['a', 'b', 'c'])\n", (15801, 15852), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((15864, 15881), 'pandas.Series', 'Series', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (15870, 15881), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((15892, 15927), 'pandas.DataFrame', 'DataFrame', (["{'cat': cat, 'ser': ser}"], {}), "({'cat': cat, 'ser': ser})\n", (15901, 15927), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((16007, 16063), 'pandas.Categorical', 'Categorical', (["['a', 'b', 'c']"], {'categories': "['a', 'b', 'c']"}), "(['a', 'b', 'c'], categories=['a', 'b', 'c'])\n", (16018, 16063), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((16080, 16132), 'pandas.Series', 'Series', (['[1, np.nan, np.nan]'], {'index': 'index', 'name': '"""ser"""'}), "([1, np.nan, np.nan], index=index, name='ser')\n", (16086, 16132), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((16173, 16213), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (16195, 16213), True, 'import pandas._testing as tm\n'), ((16296, 16363), 'pandas.Categorical', 'Categorical', (["[np.nan, 'a', np.nan, 'a']"], {'categories': "['a', 'b', 'c']"}), "([np.nan, 'a', np.nan, 'a'], categories=['a', 'b', 'c'])\n", (16307, 16363), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((16374, 16394), 'pandas.Series', 'Series', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (16380, 16394), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((16405, 16436), 'pandas.DataFrame', 'DataFrame', (["{'s1': s1, 's2': s2}"], {}), "({'s1': s1, 's2': s2})\n", (16414, 16436), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((16861, 16900), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (16882, 16900), True, 'import pandas._testing as tm\n'), ((17392, 17495), 'pandas.Categorical', 'Categorical', (["['d', 'a', 'b', 'a', 'd', 'b']"], {'categories': "['a', 'b', 'missing', 'd']", 'ordered': 'ordered'}), "(['d', 'a', 'b', 'a', 'd', 'b'], categories=['a', 'b', 'missing',\n 'd'], ordered=ordered)\n", (17403, 17495), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((17538, 17576), 'pandas.Series', 'Series', (["['d', 'a', 'b', 'a', 'd', 'b']"], {}), "(['d', 'a', 'b', 'a', 'd', 'b'])\n", (17544, 17576), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((17587, 17626), 'pandas.DataFrame', 'DataFrame', (["{'label': label, 'val': val}"], {}), "({'label': label, 'val': val})\n", (17596, 17626), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((17922, 17964), 'pandas.Series', 'Series', (['result.index.array'], {'dtype': '"""object"""'}), "(result.index.array, dtype='object')\n", (17928, 17964), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((17977, 17997), 'pandas.Series', 'Series', (['result.array'], {}), '(result.array)\n', (17983, 17997), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((18413, 18451), 'pandas.date_range', 'pd.date_range', (['"""2014-01-01"""'], {'periods': '(4)'}), "('2014-01-01', periods=4)\n", (18426, 18451), True, 'import pandas as pd\n'), ((18465, 18498), 'numpy.random.randint', 'np.random.randint', (['(0)', '(4)'], {'size': '(100)'}), '(0, 4, size=100)\n', (18482, 18498), True, 'import numpy as np\n'), ((18513, 18564), 'pandas.Categorical.from_codes', 'Categorical.from_codes', (['codes', 'levels'], {'ordered': '(True)'}), '(codes, levels, ordered=True)\n', (18535, 18564), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((18805, 18878), 'pandas.CategoricalIndex', 'CategoricalIndex', (['expected.index'], {'categories': 'expected.index', 'ordered': '(True)'}), '(expected.index, categories=expected.index, ordered=True)\n', (18821, 18878), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((18902, 18941), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (18923, 18941), True, 'import pandas._testing as tm\n'), ((19207, 19251), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['desc_result', 'expected'], {}), '(desc_result, expected)\n', (19228, 19251), True, 'import pandas._testing as tm\n'), ((19257, 19313), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['desc_result.index', 'expected.index'], {}), '(desc_result.index, expected.index)\n', (19278, 19313), True, 'import pandas._testing as tm\n'), ((19542, 19564), 'pandas.CategoricalIndex', 'CategoricalIndex', (['expc'], {}), '(expc)\n', (19558, 19564), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((19657, 19727), 'pandas.Index', 'Index', (["(['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max'] * 4)"], {}), "(['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max'] * 4)\n", (19662, 19727), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((19855, 19883), 'numpy.random.RandomState', 'np.random.RandomState', (['(12345)'], {}), '(12345)\n', (19876, 19883), True, 'import numpy as np\n'), ((19977, 20028), 'pandas.Categorical.from_codes', 'Categorical.from_codes', (['codes', 'levels'], {'ordered': '(True)'}), '(codes, levels, ordered=True)\n', (19999, 20028), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((20446, 20485), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (20467, 20485), True, 'import pandas._testing as tm\n'), ((20804, 20843), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (20825, 20843), True, 'import pandas._testing as tm\n'), ((20918, 21023), 'pandas.CategoricalIndex', 'CategoricalIndex', (["['qux', 'foo', 'baz', 'bar']"], {'categories': "['foo', 'bar', 'baz', 'qux']", 'ordered': '(True)'}), "(['qux', 'foo', 'baz', 'bar'], categories=['foo', 'bar',\n 'baz', 'qux'], ordered=True)\n", (20934, 21023), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((21696, 21754), 'pandas.CategoricalIndex', 'CategoricalIndex', (["['A', 'B']"], {'ordered': '(False)', 'name': '"""medium"""'}), "(['A', 'B'], ordered=False, name='medium')\n", (21712, 21754), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((21760, 21810), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['result.columns', 'exp_columns'], {}), '(result.columns, exp_columns)\n', (21781, 21810), True, 'import pandas._testing as tm\n'), ((21816, 21886), 'pandas._testing.assert_categorical_equal', 'tm.assert_categorical_equal', (['result.columns.values', 'exp_columns.values'], {}), '(result.columns.values, exp_columns.values)\n', (21843, 21886), True, 'import pandas._testing as tm\n'), ((22001, 22041), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (22023, 22041), True, 'import pandas._testing as tm\n'), ((22104, 22152), 'pandas.Series', 'Series', (['[np.nan, np.nan, 1, 1, 2, 2, 3, 3, 4, 4]'], {}), '([np.nan, np.nan, 1, 1, 2, 2, 3, 3, 4, 4])\n', (22110, 22152), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((22917, 22956), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (22938, 22956), True, 'import pandas._testing as tm\n'), ((23322, 23361), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (23343, 23361), True, 'import pandas._testing as tm\n'), ((23437, 23472), 'pandas.Series', 'Series', (["['a', 'b', 'b']"], {'name': '"""cat"""'}), "(['a', 'b', 'b'], name='cat')\n", (23443, 23472), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((23552, 23591), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (23573, 23591), True, 'import pandas._testing as tm\n'), ((24314, 24378), 'pandas.CategoricalIndex', 'CategoricalIndex', (['categories', 'categories'], {'ordered': '(True)', 'name': '"""A"""'}), "(categories, categories, ordered=True, name='A')\n", (24330, 24378), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((24727, 24792), 'pandas.CategoricalIndex', 'CategoricalIndex', (['categories', 'categories'], {'ordered': '(False)', 'name': '"""A"""'}), "(categories, categories, ordered=False, name='A')\n", (24743, 24792), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((26499, 26564), 'pandas.Categorical', 'Categorical', (["['first', 'second', 'third', 'fourth']"], {'ordered': '(True)'}), "(['first', 'second', 'third', 'fourth'], ordered=True)\n", (26510, 26564), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((26575, 26625), 'pandas.DataFrame', 'DataFrame', (["{'payload': [-1, -2, -1, -2], 'col': c}"], {}), "({'payload': [-1, -2, -1, -2], 'col': c})\n", (26584, 26625), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((26819, 26858), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (26840, 26858), True, 'import pandas._testing as tm\n'), ((26954, 26991), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 2, 2, 2]'], {}), '([0, 0, 0, 1, 1, 1, 2, 2, 2])\n', (26962, 26991), True, 'import numpy as np\n'), ((27004, 27058), 'pandas.Categorical.from_codes', 'Categorical.from_codes', (['codes', '[0, 1, 2]'], {'ordered': '(True)'}), '(codes, [0, 1, 2], ordered=True)\n', (27026, 27058), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((27190, 27267), 'pandas.CategoricalIndex', 'CategoricalIndex', (['exp.index'], {'categories': 'cats.categories', 'ordered': 'cats.ordered'}), '(exp.index, categories=cats.categories, ordered=cats.ordered)\n', (27206, 27267), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((27289, 27324), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'exp'], {}), '(result, exp)\n', (27311, 27324), True, 'import pandas._testing as tm\n'), ((27340, 27377), 'numpy.array', 'np.array', (['[0, 0, 0, 1, 1, 1, 3, 3, 3]'], {}), '([0, 0, 0, 1, 1, 1, 3, 3, 3])\n', (27348, 27377), True, 'import numpy as np\n'), ((27390, 27447), 'pandas.Categorical.from_codes', 'Categorical.from_codes', (['codes', '[0, 1, 2, 3]'], {'ordered': '(True)'}), '(codes, [0, 1, 2, 3], ordered=True)\n', (27412, 27447), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((27602, 27679), 'pandas.CategoricalIndex', 'CategoricalIndex', (['exp.index'], {'categories': 'cats.categories', 'ordered': 'cats.ordered'}), '(exp.index, categories=cats.categories, ordered=cats.ordered)\n', (27618, 27679), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((27701, 27736), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'exp'], {}), '(result, exp)\n', (27723, 27736), True, 'import pandas._testing as tm\n'), ((27751, 27860), 'pandas.Categorical', 'Categorical', (["['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c']"], {'categories': "['a', 'b', 'c', 'd']", 'ordered': '(True)'}), "(['a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'c'], categories=['a',\n 'b', 'c', 'd'], ordered=True)\n", (27762, 27860), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((27904, 27960), 'pandas.DataFrame', 'DataFrame', (["{'a': [1, 1, 1, 2, 2, 2, 3, 4, 5], 'b': cats}"], {}), "({'a': [1, 1, 1, 2, 2, 2, 3, 4, 5], 'b': cats})\n", (27913, 27960), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((28062, 28089), 'numpy.array', 'np.array', (['[1, 2, 4, np.nan]'], {}), '([1, 2, 4, np.nan])\n', (28070, 28089), True, 'import numpy as np\n'), ((28095, 28135), 'pandas._testing.assert_numpy_array_equal', 'tm.assert_numpy_array_equal', (['result', 'exp'], {}), '(result, exp)\n', (28122, 28135), True, 'import pandas._testing as tm\n'), ((28588, 28628), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (28610, 28628), True, 'import pandas._testing as tm\n'), ((29095, 29122), 'pandas.Categorical', 'Categorical', (['labels', 'labels'], {}), '(labels, labels)\n', (29106, 29122), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((29458, 29506), 'pandas.CategoricalIndex', 'CategoricalIndex', (['exp.index'], {'name': 'exp.index.name'}), '(exp.index, name=exp.index.name)\n', (29474, 29506), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((29512, 29544), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['res', 'exp'], {}), '(res, exp)\n', (29534, 29544), True, 'import pandas._testing as tm\n'), ((29636, 29839), 'pandas.DataFrame', 'DataFrame', (["[['(7.5, 10]', 10, 10], ['(7.5, 10]', 8, 20], ['(2.5, 5]', 5, 30], [\n '(5, 7.5]', 6, 40], ['(2.5, 5]', 4, 50], ['(0, 2.5]', 1, 60], [\n '(5, 7.5]', 7, 70]]"], {'columns': "['range', 'foo', 'bar']"}), "([['(7.5, 10]', 10, 10], ['(7.5, 10]', 8, 20], ['(2.5, 5]', 5, 30],\n ['(5, 7.5]', 6, 40], ['(2.5, 5]', 4, 50], ['(0, 2.5]', 1, 60], [\n '(5, 7.5]', 7, 70]], columns=['range', 'foo', 'bar'])\n", (29645, 29839), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((29979, 30017), 'pandas.Categorical', 'Categorical', (["df['range']"], {'ordered': '(True)'}), "(df['range'], ordered=True)\n", (29990, 30017), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((30031, 30131), 'pandas.CategoricalIndex', 'CategoricalIndex', (["['(0, 2.5]', '(2.5, 5]', '(5, 7.5]', '(7.5, 10]']"], {'name': '"""range"""', 'ordered': '(True)'}), "(['(0, 2.5]', '(2.5, 5]', '(5, 7.5]', '(7.5, 10]'], name=\n 'range', ordered=True)\n", (30047, 30131), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((30164, 30253), 'pandas.DataFrame', 'DataFrame', (['[[1, 60], [5, 30], [6, 40], [10, 10]]'], {'columns': "['foo', 'bar']", 'index': 'index'}), "([[1, 60], [5, 30], [6, 40], [10, 10]], columns=['foo', 'bar'],\n index=index)\n", (30173, 30253), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((30362, 30411), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result_sort', 'expected_sort'], {}), '(result_sort, expected_sort)\n', (30383, 30411), True, 'import pandas._testing as tm\n'), ((30595, 30644), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result_sort', 'expected_sort'], {}), '(result_sort, expected_sort)\n', (30616, 30644), True, 'import pandas._testing as tm\n'), ((30666, 30705), 'pandas.Categorical', 'Categorical', (["df['range']"], {'ordered': '(False)'}), "(df['range'], ordered=False)\n", (30677, 30705), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((30719, 30805), 'pandas.CategoricalIndex', 'CategoricalIndex', (["['(0, 2.5]', '(2.5, 5]', '(5, 7.5]', '(7.5, 10]']"], {'name': '"""range"""'}), "(['(0, 2.5]', '(2.5, 5]', '(5, 7.5]', '(7.5, 10]'], name=\n 'range')\n", (30735, 30805), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((30838, 30927), 'pandas.DataFrame', 'DataFrame', (['[[1, 60], [5, 30], [6, 40], [10, 10]]'], {'columns': "['foo', 'bar']", 'index': 'index'}), "([[1, 60], [5, 30], [6, 40], [10, 10]], columns=['foo', 'bar'],\n index=index)\n", (30847, 30927), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((30955, 31102), 'pandas.CategoricalIndex', 'CategoricalIndex', (["['(7.5, 10]', '(2.5, 5]', '(5, 7.5]', '(0, 2.5]']"], {'categories': "['(7.5, 10]', '(2.5, 5]', '(5, 7.5]', '(0, 2.5]']", 'name': '"""range"""'}), "(['(7.5, 10]', '(2.5, 5]', '(5, 7.5]', '(0, 2.5]'],\n categories=['(7.5, 10]', '(2.5, 5]', '(5, 7.5]', '(0, 2.5]'], name='range')\n", (30971, 31102), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((31157, 31247), 'pandas.DataFrame', 'DataFrame', (['[[10, 10], [5, 30], [6, 40], [1, 60]]'], {'index': 'index', 'columns': "['foo', 'bar']"}), "([[10, 10], [5, 30], [6, 40], [1, 60]], index=index, columns=[\n 'foo', 'bar'])\n", (31166, 31247), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((31421, 31470), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result_sort', 'expected_sort'], {}), '(result_sort, expected_sort)\n', (31442, 31470), True, 'import pandas._testing as tm\n'), ((31551, 31604), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result_nosort', 'expected_nosort'], {}), '(result_nosort, expected_nosort)\n', (31572, 31604), True, 'import pandas._testing as tm\n'), ((32301, 32336), 'pandas.Categorical', 'Categorical', (["df['dt']"], {'ordered': '(True)'}), "(df['dt'], ordered=True)\n", (32312, 32336), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((32502, 32574), 'pandas.DataFrame', 'DataFrame', (['[[1, 60], [5, 30], [6, 40], [10, 10]]'], {'columns': "['foo', 'bar']"}), "([[1, 60], [5, 30], [6, 40], [10, 10]], columns=['foo', 'bar'])\n", (32511, 32574), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((32616, 32664), 'pandas.CategoricalIndex', 'CategoricalIndex', (['index'], {'name': '"""dt"""', 'ordered': '(True)'}), "(index, name='dt', ordered=True)\n", (32632, 32664), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((32834, 32906), 'pandas.DataFrame', 'DataFrame', (['[[10, 10], [5, 30], [6, 40], [1, 60]]'], {'columns': "['foo', 'bar']"}), "([[10, 10], [5, 30], [6, 40], [1, 60]], columns=['foo', 'bar'])\n", (32843, 32906), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((32950, 33016), 'pandas.CategoricalIndex', 'CategoricalIndex', (['index'], {'categories': 'index', 'name': '"""dt"""', 'ordered': '(True)'}), "(index, categories=index, name='dt', ordered=True)\n", (32966, 33016), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((33383, 33419), 'pandas.Categorical', 'Categorical', (["df['dt']"], {'ordered': '(False)'}), "(df['dt'], ordered=False)\n", (33394, 33419), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((33585, 33657), 'pandas.DataFrame', 'DataFrame', (['[[1, 60], [5, 30], [6, 40], [10, 10]]'], {'columns': "['foo', 'bar']"}), "([[1, 60], [5, 30], [6, 40], [10, 10]], columns=['foo', 'bar'])\n", (33594, 33657), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((33699, 33733), 'pandas.CategoricalIndex', 'CategoricalIndex', (['index'], {'name': '"""dt"""'}), "(index, name='dt')\n", (33715, 33733), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((33903, 33975), 'pandas.DataFrame', 'DataFrame', (['[[10, 10], [5, 30], [6, 40], [1, 60]]'], {'columns': "['foo', 'bar']"}), "([[10, 10], [5, 30], [6, 40], [1, 60]], columns=['foo', 'bar'])\n", (33912, 33975), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((34019, 34071), 'pandas.CategoricalIndex', 'CategoricalIndex', (['index'], {'categories': 'index', 'name': '"""dt"""'}), "(index, categories=index, name='dt')\n", (34035, 34071), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((34530, 34573), 'pandas.CategoricalIndex', 'CategoricalIndex', (["['a', 'b', 'c']"], {'name': '"""A"""'}), "(['a', 'b', 'c'], name='A')\n", (34546, 34573), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((34666, 34707), 'pandas.Series', 'Series', (['[3, 1, 0]', 'expected_idx'], {'name': '"""B"""'}), "([3, 1, 0], expected_idx, name='B')\n", (34672, 34707), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((34713, 34753), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (34735, 34753), True, 'import pandas._testing as tm\n'), ((34856, 34897), 'pandas.Series', 'Series', (['[3, 1, 0]', 'expected_idx'], {'name': '"""B"""'}), "([3, 1, 0], expected_idx, name='B')\n", (34862, 34897), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((34903, 34943), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (34925, 34943), True, 'import pandas._testing as tm\n'), ((35046, 35092), 'pandas.Series', 'Series', (['[3, 1, np.nan]', 'expected_idx'], {'name': '"""B"""'}), "([3, 1, np.nan], expected_idx, name='B')\n", (35052, 35092), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((35098, 35138), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (35120, 35138), True, 'import pandas._testing as tm\n'), ((35241, 35292), 'pandas.Series', 'Series', (['[3, np.nan, np.nan]', 'expected_idx'], {'name': '"""B"""'}), "([3, np.nan, np.nan], expected_idx, name='B')\n", (35247, 35292), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((35298, 35338), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (35320, 35338), True, 'import pandas._testing as tm\n'), ((35563, 35606), 'pandas.CategoricalIndex', 'CategoricalIndex', (["['a', 'b', 'c']"], {'name': '"""A"""'}), "(['a', 'b', 'c'], name='A')\n", (35579, 35606), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((35700, 35741), 'pandas.Series', 'Series', (['[2, 1, 1]', 'expected_idx'], {'name': '"""B"""'}), "([2, 1, 1], expected_idx, name='B')\n", (35706, 35741), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((35747, 35787), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (35769, 35787), True, 'import pandas._testing as tm\n'), ((35891, 35932), 'pandas.Series', 'Series', (['[2, 1, 1]', 'expected_idx'], {'name': '"""B"""'}), "([2, 1, 1], expected_idx, name='B')\n", (35897, 35932), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((35938, 35978), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (35960, 35978), True, 'import pandas._testing as tm\n'), ((36082, 36128), 'pandas.Series', 'Series', (['[2, 1, np.nan]', 'expected_idx'], {'name': '"""B"""'}), "([2, 1, np.nan], expected_idx, name='B')\n", (36088, 36128), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((36134, 36174), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (36156, 36174), True, 'import pandas._testing as tm\n'), ((36847, 36913), 'pandas.DataFrame', 'DataFrame', (["{'values': [0, 4, 8, 3, 4, 5, 6, np.nan, 2]}"], {'index': 'idx'}), "({'values': [0, 4, 8, 3, 4, 5, 6, np.nan, 2]}, index=idx)\n", (36856, 36913), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((36919, 36958), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (36940, 36958), True, 'import pandas._testing as tm\n'), ((37877, 37910), 'pandas._testing.assert_equal', 'tm.assert_equal', (['result', 'expected'], {}), '(result, expected)\n', (37892, 37910), True, 'import pandas._testing as tm\n'), ((38018, 38104), 'pandas.Categorical', 'Categorical', (["['a', 'b', 'c', 'd']"], {'categories': "['a', 'b', 'c', 'd']", 'ordered': '(False)'}), "(['a', 'b', 'c', 'd'], categories=['a', 'b', 'c', 'd'], ordered=\n False)\n", (38029, 38104), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((38132, 38219), 'pandas.Categorical', 'Categorical', (["[None, 'a', 'b', 'c']"], {'categories': "['a', 'b', 'c', 'd']", 'ordered': '(False)'}), "([None, 'a', 'b', 'c'], categories=['a', 'b', 'c', 'd'], ordered\n =False)\n", (38143, 38219), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((38282, 38312), 'pandas._testing.assert_equal', 'tm.assert_equal', (['res', 'expected'], {}), '(res, expected)\n', (38297, 38312), True, 'import pandas._testing as tm\n'), ((38970, 38990), 'pandas.Series', 'Series', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (38976, 38990), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((39441, 39489), 'pandas.Series', 'Series', ([], {'data': '[1, 3, 2, 4]', 'index': 'index', 'name': '"""C"""'}), "(data=[1, 3, 2, 4], index=index, name='C')\n", (39447, 39489), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((39604, 39644), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (39626, 39644), True, 'import pandas._testing as tm\n'), ((40113, 40177), 'pandas.Series', 'Series', ([], {'data': '[2, 4, np.nan, 1, np.nan, 3]', 'index': 'index', 'name': '"""C"""'}), "(data=[2, 4, np.nan, 1, np.nan, 3], index=index, name='C')\n", (40119, 40177), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((40381, 40421), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (40403, 40421), True, 'import pandas._testing as tm\n'), ((42060, 42100), 'pandas.Series', 'Series', ([], {'data': 'data', 'index': 'index', 'name': '"""C"""'}), "(data=data, index=index, name='C')\n", (42066, 42100), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((42237, 42277), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (42259, 42277), True, 'import pandas._testing as tm\n'), ((42478, 42518), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (42500, 42518), True, 'import pandas._testing as tm\n'), ((42655, 42727), 'pandas.DataFrame', 'DataFrame', (["{'a': [1, 2, 3, 4], 'b': [-1, -2, -3, -4], 'c': [5, 6, 7, 8]}"], {}), "({'a': [1, 2, 3, 4], 'b': [-1, -2, -3, -4], 'c': [5, 6, 7, 8]})\n", (42664, 42727), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((42893, 42932), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (42914, 42932), True, 'import pandas._testing as tm\n'), ((43331, 43370), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (43352, 43370), True, 'import pandas._testing as tm\n'), ((48681, 48693), 'pandas.DataFrame', 'DataFrame', (['d'], {}), '(d)\n', (48690, 48693), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((48925, 48965), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (48947, 48965), True, 'import pandas._testing as tm\n'), ((49527, 49566), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (49548, 49566), True, 'import pandas._testing as tm\n'), ((49709, 49747), 'pandas.DataFrame', 'DataFrame', (["{'A': [2, 1]}"], {'index': '[1, 2]'}), "({'A': [2, 1]}, index=[1, 2])\n", (49718, 49747), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((49811, 49850), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (49832, 49850), True, 'import pandas._testing as tm\n'), ((49904, 49943), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (49925, 49943), True, 'import pandas._testing as tm\n'), ((50473, 50513), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (50495, 50513), True, 'import pandas._testing as tm\n'), ((50585, 50601), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (50593, 50601), True, 'import numpy as np\n'), ((50891, 50930), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (50912, 50930), True, 'import pandas._testing as tm\n'), ((51005, 51152), 'pandas.DataFrame', 'DataFrame', (["{'foo': ['small', 'large', 'large', 'large', 'medium', 'large', 'large',\n 'medium'], 'bar': ['C', 'A', 'A', 'C', 'A', 'C', 'A', 'C']}"], {}), "({'foo': ['small', 'large', 'large', 'large', 'medium', 'large',\n 'large', 'medium'], 'bar': ['C', 'A', 'A', 'C', 'A', 'C', 'A', 'C']})\n", (51014, 51152), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((51528, 51652), 'pandas.DataFrame', 'DataFrame', (["{'tiny': {'A': 0, 'C': 0}, 'small': {'A': 0, 'C': 1}, 'medium': {'A': 1,\n 'C': 1}, 'large': {'A': 3, 'C': 2}}"], {}), "({'tiny': {'A': 0, 'C': 0}, 'small': {'A': 0, 'C': 1}, 'medium': {\n 'A': 1, 'C': 1}, 'large': {'A': 3, 'C': 2}})\n", (51537, 51652), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((51810, 51961), 'pandas.CategoricalIndex', 'CategoricalIndex', (["['tiny', 'small', 'medium', 'large']"], {'categories': "['tiny', 'small', 'medium', 'large']", 'ordered': '(True)', 'name': '"""foo"""', 'dtype': '"""category"""'}), "(['tiny', 'small', 'medium', 'large'], categories=['tiny',\n 'small', 'medium', 'large'], ordered=True, name='foo', dtype='category')\n", (51826, 51961), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((52078, 52117), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (52099, 52117), True, 'import pandas._testing as tm\n'), ((52247, 52283), 'pandas.DataFrame', 'DataFrame', (["{'col_num': [1, 1, 2, 3]}"], {}), "({'col_num': [1, 1, 2, 3]})\n", (52256, 52283), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((52487, 52527), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (52509, 52527), True, 'import pandas._testing as tm\n'), ((52633, 52672), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (52654, 52672), True, 'import pandas._testing as tm\n'), ((52940, 52973), 'pandas.DataFrame', 'DataFrame', (["{'A': [1997], 'B': ds}"], {}), "({'A': [1997], 'B': ds})\n", (52949, 52973), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((53097, 53136), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (53118, 53136), True, 'import pandas._testing as tm\n'), ((53213, 53405), 'pandas.DataFrame', 'DataFrame', (["{'A': [1, 1, 1, 1], 'B': [1, 2, 1, 2], 'numerical_col': [0.1, 0.2, np.nan, \n 0.3], 'object_col': ['foo', 'bar', 'foo', 'fee'], 'categorical_col': [\n 'foo', 'bar', 'foo', 'fee']}"], {}), "({'A': [1, 1, 1, 1], 'B': [1, 2, 1, 2], 'numerical_col': [0.1, 0.2,\n np.nan, 0.3], 'object_col': ['foo', 'bar', 'foo', 'fee'],\n 'categorical_col': ['foo', 'bar', 'foo', 'fee']})\n", (53222, 53405), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((53630, 53691), 'pandas.MultiIndex.from_arrays', 'pd.MultiIndex.from_arrays', (['[[1, 1], [1, 2]]'], {'names': "('A', 'B')"}), "([[1, 1], [1, 2]], names=('A', 'B'))\n", (53655, 53691), True, 'import pandas as pd\n'), ((53708, 53819), 'pandas.DataFrame', 'DataFrame', ([], {'data': "{'numerical_col': [1.0, 0.0], 'object_col': [0, 0], 'categorical_col': [0, 0]}", 'index': 'index'}), "(data={'numerical_col': [1.0, 0.0], 'object_col': [0, 0],\n 'categorical_col': [0, 0]}, index=index)\n", (53717, 53819), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((53898, 53937), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (53919, 53937), True, 'import pandas._testing as tm\n'), ((54003, 54134), 'pandas.DataFrame', 'DataFrame', (["{'package_id': [1, 1, 1, 2, 2, 3], 'status': ['Waiting', 'OnTheWay',\n 'Delivered', 'Waiting', 'OnTheWay', 'Waiting']}"], {}), "({'package_id': [1, 1, 1, 2, 2, 3], 'status': ['Waiting',\n 'OnTheWay', 'Delivered', 'Waiting', 'OnTheWay', 'Waiting']})\n", (54012, 54134), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((54333, 54419), 'pandas.CategoricalDtype', 'pd.CategoricalDtype', ([], {'categories': "['Waiting', 'OnTheWay', 'Delivered']", 'ordered': '(True)'}), "(categories=['Waiting', 'OnTheWay', 'Delivered'],\n ordered=True)\n", (54352, 54419), True, 'import pandas as pd\n'), ((54611, 54841), 'pandas.DataFrame', 'DataFrame', (["{'package_id': [1, 1, 1, 2, 2, 3], 'status': ['Waiting', 'OnTheWay',\n 'Delivered', 'Waiting', 'OnTheWay', 'Waiting'], 'last_status': [\n 'Delivered', 'Delivered', 'Delivered', 'OnTheWay', 'OnTheWay', 'Waiting']}"], {}), "({'package_id': [1, 1, 1, 2, 2, 3], 'status': ['Waiting',\n 'OnTheWay', 'Delivered', 'Waiting', 'OnTheWay', 'Waiting'],\n 'last_status': ['Delivered', 'Delivered', 'Delivered', 'OnTheWay',\n 'OnTheWay', 'Waiting']})\n", (54620, 54841), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((55216, 55255), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (55237, 55255), True, 'import pandas._testing as tm\n'), ((55452, 55477), 'pandas.Categorical', 'Categorical', (['[0, 0, 1, 1]'], {}), '([0, 0, 1, 1])\n', (55463, 55477), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((55512, 55553), 'pandas.DataFrame', 'DataFrame', (["{'a': cat, 'b': cat, 'c': val}"], {}), "({'a': cat, 'b': cat, 'c': val})\n", (55521, 55553), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((55567, 55586), 'pandas.Categorical', 'Categorical', (['[0, 1]'], {}), '([0, 1])\n', (55578, 55586), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((55598, 55654), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (['[idx, idx]'], {'names': "['a', 'b']"}), "([idx, idx], names=['a', 'b'])\n", (55624, 55654), True, 'import pandas as pd\n'), ((56033, 56073), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (56055, 56073), True, 'import pandas._testing as tm\n'), ((56266, 56291), 'pandas.Categorical', 'Categorical', (['[0, 0, 1, 1]'], {}), '([0, 0, 1, 1])\n', (56277, 56291), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((56326, 56367), 'pandas.DataFrame', 'DataFrame', (["{'a': cat, 'b': cat, 'c': val}"], {}), "({'a': cat, 'b': cat, 'c': val})\n", (56335, 56367), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((56381, 56400), 'pandas.Categorical', 'Categorical', (['[0, 1]'], {}), '([0, 1])\n', (56392, 56400), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((56412, 56468), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (['[idx, idx]'], {'names': "['a', 'b']"}), "([idx, idx], names=['a', 'b'])\n", (56438, 56468), True, 'import pandas as pd\n'), ((56851, 56890), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (56872, 56890), True, 'import pandas._testing as tm\n'), ((5876, 5899), 'numpy.random.randn', 'np.random.randn', (['(100)', '(4)'], {}), '(100, 4)\n', (5891, 5899), True, 'import numpy as np\n'), ((10811, 10886), 'pandas.Categorical', 'Categorical', (["['a', 'b', 'a', 'b']"], {'categories': "['a', 'b', 'c']", 'ordered': '(True)'}), "(['a', 'b', 'a', 'b'], categories=['a', 'b', 'c'], ordered=True)\n", (10822, 10886), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((12418, 12457), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (12439, 12457), True, 'import pandas._testing as tm\n'), ((12705, 12726), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(3)'], {}), '(0, 10, 3)\n', (12716, 12726), True, 'import numpy as np\n'), ((18589, 18612), 'numpy.random.randn', 'np.random.randn', (['(100)', '(4)'], {}), '(100, 4)\n', (18604, 18612), True, 'import numpy as np\n'), ((20362, 20420), 'pandas.Categorical.from_codes', 'Categorical.from_codes', (['[0, 1, 2, 3]', 'levels'], {'ordered': '(True)'}), '([0, 1, 2, 3], levels, ordered=True)\n', (20384, 20420), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((20720, 20778), 'pandas.Categorical.from_codes', 'Categorical.from_codes', (['[0, 1, 2, 3]', 'levels'], {'ordered': '(True)'}), '([0, 1, 2, 3], levels, ordered=True)\n', (20742, 20778), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((21075, 21097), 'numpy.random.randn', 'np.random.randn', (['(20)', '(4)'], {}), '(20, 4)\n', (21090, 21097), True, 'import numpy as np\n'), ((22322, 22358), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'msg'}), '(ValueError, match=msg)\n', (22335, 22358), False, 'import pytest\n'), ((24067, 24106), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (24088, 24106), True, 'import pandas._testing as tm\n'), ((26065, 26105), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result1', 'expected'], {}), '(result1, expected)\n', (26086, 26105), True, 'import pandas._testing as tm\n'), ((26115, 26155), 'pandas._testing.assert_frame_equal', 'tm.assert_frame_equal', (['result2', 'expected'], {}), '(result2, expected)\n', (26136, 26155), True, 'import pandas._testing as tm\n'), ((26919, 26937), 'numpy.random.randn', 'np.random.randn', (['(9)'], {}), '(9)\n', (26934, 26937), True, 'import numpy as np\n'), ((28457, 28502), 'pandas.Categorical', 'Categorical', (['[]'], {'categories': "['test', 'train']"}), "([], categories=['test', 'train'])\n", (28468, 28502), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((32361, 32381), 'datetime.datetime', 'datetime', (['(2011)', '(1)', '(1)'], {}), '(2011, 1, 1)\n', (32369, 32381), False, 'from datetime import datetime\n'), ((32392, 32412), 'datetime.datetime', 'datetime', (['(2011)', '(2)', '(1)'], {}), '(2011, 2, 1)\n', (32400, 32412), False, 'from datetime import datetime\n'), ((32423, 32443), 'datetime.datetime', 'datetime', (['(2011)', '(5)', '(1)'], {}), '(2011, 5, 1)\n', (32431, 32443), False, 'from datetime import datetime\n'), ((32454, 32474), 'datetime.datetime', 'datetime', (['(2011)', '(7)', '(1)'], {}), '(2011, 7, 1)\n', (32462, 32474), False, 'from datetime import datetime\n'), ((32691, 32711), 'datetime.datetime', 'datetime', (['(2011)', '(7)', '(1)'], {}), '(2011, 7, 1)\n', (32699, 32711), False, 'from datetime import datetime\n'), ((32722, 32742), 'datetime.datetime', 'datetime', (['(2011)', '(2)', '(1)'], {}), '(2011, 2, 1)\n', (32730, 32742), False, 'from datetime import datetime\n'), ((32753, 32773), 'datetime.datetime', 'datetime', (['(2011)', '(5)', '(1)'], {}), '(2011, 5, 1)\n', (32761, 32773), False, 'from datetime import datetime\n'), ((32784, 32804), 'datetime.datetime', 'datetime', (['(2011)', '(1)', '(1)'], {}), '(2011, 1, 1)\n', (32792, 32804), False, 'from datetime import datetime\n'), ((33444, 33464), 'datetime.datetime', 'datetime', (['(2011)', '(1)', '(1)'], {}), '(2011, 1, 1)\n', (33452, 33464), False, 'from datetime import datetime\n'), ((33475, 33495), 'datetime.datetime', 'datetime', (['(2011)', '(2)', '(1)'], {}), '(2011, 2, 1)\n', (33483, 33495), False, 'from datetime import datetime\n'), ((33506, 33526), 'datetime.datetime', 'datetime', (['(2011)', '(5)', '(1)'], {}), '(2011, 5, 1)\n', (33514, 33526), False, 'from datetime import datetime\n'), ((33537, 33557), 'datetime.datetime', 'datetime', (['(2011)', '(7)', '(1)'], {}), '(2011, 7, 1)\n', (33545, 33557), False, 'from datetime import datetime\n'), ((33760, 33780), 'datetime.datetime', 'datetime', (['(2011)', '(7)', '(1)'], {}), '(2011, 7, 1)\n', (33768, 33780), False, 'from datetime import datetime\n'), ((33791, 33811), 'datetime.datetime', 'datetime', (['(2011)', '(2)', '(1)'], {}), '(2011, 2, 1)\n', (33799, 33811), False, 'from datetime import datetime\n'), ((33822, 33842), 'datetime.datetime', 'datetime', (['(2011)', '(5)', '(1)'], {}), '(2011, 5, 1)\n', (33830, 33842), False, 'from datetime import datetime\n'), ((33853, 33873), 'datetime.datetime', 'datetime', (['(2011)', '(1)', '(1)'], {}), '(2011, 1, 1)\n', (33861, 33873), False, 'from datetime import datetime\n'), ((39286, 39383), 'pandas.DataFrame', 'DataFrame', (["{'A': ['foo', 'foo', 'bar', 'bar'], 'B': ['one', 'two', 'one', 'three']}"], {}), "({'A': ['foo', 'foo', 'bar', 'bar'], 'B': ['one', 'two', 'one',\n 'three']}, **kwargs)\n", (39295, 39383), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((43548, 43586), 'pytest.raises', 'pytest.raises', (['KeyError'], {'match': '"""\'vau\'"""'}), '(KeyError, match="\'vau\'")\n', (43561, 43586), False, 'import pytest\n'), ((43920, 43966), 'pytest.skip', 'pytest.skip', (['"""ngroup is not truly a reduction"""'], {}), "('ngroup is not truly a reduction')\n", (43931, 43966), False, 'import pytest\n'), ((44035, 44122), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""TODO: implemented SeriesGroupBy.corrwith. See GH 32293"""'}), "(reason=\n 'TODO: implemented SeriesGroupBy.corrwith. See GH 32293')\n", (44052, 44122), False, 'import pytest\n'), ((44964, 45010), 'pytest.skip', 'pytest.skip', (['"""ngroup is not truly a reduction"""'], {}), "('ngroup is not truly a reduction')\n", (44975, 45010), False, 'import pytest\n'), ((45079, 45166), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""TODO: implemented SeriesGroupBy.corrwith. See GH 32293"""'}), "(reason=\n 'TODO: implemented SeriesGroupBy.corrwith. See GH 32293')\n", (45096, 45166), False, 'import pytest\n'), ((46313, 46352), 'numpy.issubdtype', 'np.issubdtype', (['result.dtype', 'np.integer'], {}), '(result.dtype, np.integer)\n', (46326, 46352), True, 'import numpy as np\n'), ((46666, 46731), 'pytest.skip', 'pytest.skip', (['"""ngroup does not return the Categories on the index"""'], {}), "('ngroup does not return the Categories on the index')\n", (46677, 46731), False, 'import pytest\n'), ((47692, 47757), 'pytest.skip', 'pytest.skip', (['"""ngroup does not return the Categories on the index"""'], {}), "('ngroup does not return the Categories on the index')\n", (47703, 47757), False, 'import pytest\n'), ((48723, 48744), 'numpy.linspace', 'np.linspace', (['(0)', '(20)', '(5)'], {}), '(0, 20, 5)\n', (48734, 48744), True, 'import numpy as np\n'), ((55696, 55741), 'pandas.Series', 'Series', (['[0, np.NaN, np.NaN, 1]', 'idx'], {'name': '"""c"""'}), "([0, np.NaN, np.NaN, 1], idx, name='c')\n", (55702, 55741), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((55760, 55805), 'pandas.Series', 'Series', (['[1, np.NaN, np.NaN, 0]', 'idx'], {'name': '"""c"""'}), "([1, np.NaN, np.NaN, 0], idx, name='c')\n", (55766, 55805), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((56510, 56555), 'pandas.Series', 'Series', (['[0, np.NaN, np.NaN, 1]', 'idx'], {'name': '"""c"""'}), "([0, np.NaN, np.NaN, 1], idx, name='c')\n", (56516, 56555), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((56574, 56619), 'pandas.Series', 'Series', (['[1, np.NaN, np.NaN, 0]', 'idx'], {'name': '"""c"""'}), "([1, np.NaN, np.NaN, 0], idx, name='c')\n", (56580, 56619), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((3112, 3144), 'pandas.Series', 'Series', (['[3, 7, 0]'], {'index': 'exp_idx'}), '([3, 7, 0], index=exp_idx)\n', (3118, 3144), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((6344, 6360), 'numpy.asarray', 'np.asarray', (['cats'], {}), '(cats)\n', (6354, 6360), True, 'import numpy as np\n'), ((7115, 7134), 'numpy.arange', 'np.arange', (['(2)', '(22)', '(2)'], {}), '(2, 22, 2)\n', (7124, 7134), True, 'import numpy as np\n'), ((7497, 7516), 'numpy.arange', 'np.arange', (['(2)', '(12)', '(2)'], {}), '(2, 12, 2)\n', (7506, 7516), True, 'import numpy as np\n'), ((8722, 8732), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (8729, 8732), True, 'import numpy as np\n'), ((13935, 13972), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)'], {'size': '(30000)'}), '(0, 255, size=30000)\n', (13952, 13972), True, 'import numpy as np\n'), ((13997, 14034), 'numpy.random.randint', 'np.random.randint', (['(0)', '(255)'], {'size': '(30000)'}), '(0, 255, size=30000)\n', (14014, 14034), True, 'import numpy as np\n'), ((14061, 14100), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10000)'], {'size': '(30000)'}), '(0, 10000, size=30000)\n', (14078, 14100), True, 'import numpy as np\n'), ((14848, 14876), 'pandas.Index', 'Index', (['[0, 2]'], {'dtype': '"""int64"""'}), "([0, 2], dtype='int64')\n", (14853, 14876), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((14883, 14908), 'pandas.Index', 'Index', (['[1]'], {'dtype': '"""int64"""'}), "([1], dtype='int64')\n", (14888, 14908), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((14961, 14989), 'pandas.Index', 'Index', (['[0, 2]'], {'dtype': '"""int64"""'}), "([0, 2], dtype='int64')\n", (14966, 14989), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((15009, 15033), 'pandas.Index', 'Index', (['[]'], {'dtype': '"""int64"""'}), "([], dtype='int64')\n", (15014, 15033), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((15053, 15078), 'pandas.Index', 'Index', (['[1]'], {'dtype': '"""int64"""'}), "([1], dtype='int64')\n", (15058, 15078), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((15255, 15314), 'pandas.Categorical', 'Categorical', (["['a', np.nan, 'a']"], {'categories': "['a', 'b', 'd']"}), "(['a', np.nan, 'a'], categories=['a', 'b', 'd'])\n", (15266, 15314), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((15479, 15507), 'pandas.Index', 'Index', (['[0, 2]'], {'dtype': '"""int64"""'}), "([0, 2], dtype='int64')\n", (15484, 15507), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((15560, 15588), 'pandas.Index', 'Index', (['[0, 2]'], {'dtype': '"""int64"""'}), "([0, 2], dtype='int64')\n", (15565, 15588), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((15608, 15632), 'pandas.Index', 'Index', (['[]'], {'dtype': '"""int64"""'}), "([], dtype='int64')\n", (15613, 15632), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((15652, 15676), 'pandas.Index', 'Index', (['[]'], {'dtype': '"""int64"""'}), "([], dtype='int64')\n", (15657, 15676), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((21962, 21994), 'pandas.Index', 'Index', (["['X', 'Y']"], {'name': '"""artist"""'}), "(['X', 'Y'], name='artist')\n", (21967, 21994), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((22490, 22523), 'pandas.Categorical', 'Categorical', (['[1, 2, 2]', '[1, 2, 3]'], {}), '([1, 2, 2], [1, 2, 3])\n', (22501, 22523), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((22744, 22797), 'pandas.Categorical', 'Categorical', (['[1, 2]'], {'categories': 'df.cat.cat.categories'}), '([1, 2], categories=df.cat.cat.categories)\n', (22755, 22797), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((23149, 23202), 'pandas.Categorical', 'Categorical', (['[1, 2]'], {'categories': 'df.cat.cat.categories'}), '([1, 2], categories=df.cat.cat.categories)\n', (23160, 23202), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((23720, 23773), 'pandas.Categorical', 'Categorical', (['[1, 2]'], {'categories': 'df.cat.cat.categories'}), '([1, 2], categories=df.cat.cat.categories)\n', (23731, 23773), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((28339, 28378), 'pandas.Categorical', 'Categorical', (["['train', 'train', 'test']"], {}), "(['train', 'train', 'test'])\n", (28350, 28378), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((28519, 28555), 'pandas.Series', 'Series', (['[]'], {'dtype': '"""object"""', 'name': '"""A"""'}), "([], dtype='object', name='A')\n", (28525, 28555), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((28979, 29011), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10000)', '(100)'], {}), '(0, 10000, 100)\n', (28996, 29011), True, 'import numpy as np\n'), ((34429, 34485), 'pandas.Categorical', 'Categorical', (["['a', 'a', 'b']"], {'categories': "['a', 'b', 'c']"}), "(['a', 'a', 'b'], categories=['a', 'b', 'c'])\n", (34440, 34485), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((35460, 35516), 'pandas.Categorical', 'Categorical', (["['a', 'a', 'b']"], {'categories': "['a', 'b', 'c']"}), "(['a', 'a', 'b'], categories=['a', 'b', 'c'])\n", (35471, 35516), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((36525, 36537), 'numpy.arange', 'np.arange', (['(9)'], {}), '(9)\n', (36534, 36537), True, 'import numpy as np\n'), ((36669, 36697), 'pandas.Categorical', 'Categorical', (["['a', 'b', 'c']"], {}), "(['a', 'b', 'c'])\n", (36680, 36697), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((37710, 37745), 'pandas.Series', 'Series', (['[1, 1, 2]'], {'dtype': '"""category"""'}), "([1, 1, 2], dtype='category')\n", (37716, 37745), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((40533, 40783), 'pandas.MultiIndex.from_tuples', 'MultiIndex.from_tuples', (["[('foo', 'one', 'min'), ('foo', 'one', 'max'), ('foo', 'two', 'min'), (\n 'foo', 'two', 'max'), ('bar', 'one', 'min'), ('bar', 'one', 'max'), (\n 'bar', 'three', 'min'), ('bar', 'three', 'max')]"], {'names': "['A', 'B', None]"}), "([('foo', 'one', 'min'), ('foo', 'one', 'max'), (\n 'foo', 'two', 'min'), ('foo', 'two', 'max'), ('bar', 'one', 'min'), (\n 'bar', 'one', 'max'), ('bar', 'three', 'min'), ('bar', 'three', 'max')],\n names=['A', 'B', None])\n", (40555, 40783), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((43054, 43099), 'pandas.Categorical', 'Categorical', (["['Bob', 'Greg']"], {'ordered': 'ordered'}), "(['Bob', 'Greg'], ordered=ordered)\n", (43065, 43099), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((49483, 49514), 'pandas.Index', 'Index', (['[0, 1, 2]'], {'name': '"""groups"""'}), "([0, 1, 2], name='groups')\n", (49488, 49514), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((49634, 49690), 'pandas.Categorical', 'Categorical', (["['a', 'a', 'b']"], {'categories': "['a', 'b', 'c']"}), "(['a', 'a', 'b'], categories=['a', 'b', 'c'])\n", (49645, 49690), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((50433, 50456), 'pandas.Index', 'Index', (['[1997]'], {'name': '"""A"""'}), "([1997], name='A')\n", (50438, 50456), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((50801, 50835), 'pandas.CategoricalIndex', 'CategoricalIndex', (['[1, 2]'], {'name': '"""b"""'}), "([1, 2], name='b')\n", (50817, 50835), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((52432, 52464), 'pandas.Index', 'Index', (['[1, 2, 3]'], {'name': '"""col_num"""'}), "([1, 2, 3], name='col_num')\n", (52437, 52464), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((53067, 53090), 'pandas.Index', 'Index', (['[1997]'], {'name': '"""A"""'}), "([1997], name='A')\n", (53072, 53090), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((4317, 4327), 'numpy.sum', 'np.sum', (['xs'], {}), '(xs)\n', (4323, 4327), True, 'import numpy as np\n'), ((4517, 4527), 'numpy.max', 'np.max', (['xs'], {}), '(xs)\n', (4523, 4527), True, 'import numpy as np\n'), ((5046, 5056), 'numpy.sum', 'np.sum', (['xs'], {}), '(xs)\n', (5052, 5056), True, 'import numpy as np\n'), ((5246, 5256), 'numpy.sum', 'np.sum', (['xs'], {}), '(xs)\n', (5252, 5256), True, 'import numpy as np\n'), ((5990, 6006), 'numpy.asarray', 'np.asarray', (['cats'], {}), '(cats)\n', (6000, 6006), True, 'import numpy as np\n'), ((6700, 6712), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (6709, 6712), True, 'import numpy as np\n'), ((16578, 16624), 'pandas.Categorical', 'Categorical', (["['a']"], {'categories': "['a', 'b', 'c']"}), "(['a'], categories=['a', 'b', 'c'])\n", (16589, 16624), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((16728, 16784), 'pandas.Categorical', 'Categorical', (["['a', 'b', 'c']"], {'categories': "['a', 'b', 'c']"}), "(['a', 'b', 'c'], categories=['a', 'b', 'c'])\n", (16739, 16784), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((18701, 18717), 'numpy.asarray', 'np.asarray', (['cats'], {}), '(cats)\n', (18711, 18717), True, 'import numpy as np\n'), ((19485, 19497), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (19494, 19497), True, 'import numpy as np\n'), ((31839, 31859), 'datetime.datetime', 'datetime', (['(2011)', '(7)', '(1)'], {}), '(2011, 7, 1)\n', (31847, 31859), False, 'from datetime import datetime\n'), ((31878, 31898), 'datetime.datetime', 'datetime', (['(2011)', '(7)', '(1)'], {}), '(2011, 7, 1)\n', (31886, 31898), False, 'from datetime import datetime\n'), ((31917, 31937), 'datetime.datetime', 'datetime', (['(2011)', '(2)', '(1)'], {}), '(2011, 2, 1)\n', (31925, 31937), False, 'from datetime import datetime\n'), ((31956, 31976), 'datetime.datetime', 'datetime', (['(2011)', '(5)', '(1)'], {}), '(2011, 5, 1)\n', (31964, 31976), False, 'from datetime import datetime\n'), ((31995, 32015), 'datetime.datetime', 'datetime', (['(2011)', '(2)', '(1)'], {}), '(2011, 2, 1)\n', (32003, 32015), False, 'from datetime import datetime\n'), ((32034, 32054), 'datetime.datetime', 'datetime', (['(2011)', '(1)', '(1)'], {}), '(2011, 1, 1)\n', (32042, 32054), False, 'from datetime import datetime\n'), ((32073, 32093), 'datetime.datetime', 'datetime', (['(2011)', '(5)', '(1)'], {}), '(2011, 5, 1)\n', (32081, 32093), False, 'from datetime import datetime\n'), ((36724, 36776), 'pandas.date_range', 'pd.date_range', (['"""2018-06-01 00"""'], {'freq': '"""1T"""', 'periods': '(3)'}), "('2018-06-01 00', freq='1T', periods=3)\n", (36737, 36776), True, 'import pandas as pd\n'), ((43648, 43721), 'pandas.DataFrame', 'DataFrame', (["{'var': [rows.iloc[-1]['var']], 'val': [rows.iloc[-1]['vau']]}"], {}), "({'var': [rows.iloc[-1]['var']], 'val': [rows.iloc[-1]['vau']]})\n", (43657, 43721), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((45908, 45928), 'pandas.isna', 'pd.isna', (['zero_or_nan'], {}), '(zero_or_nan)\n', (45915, 45928), True, 'import pandas as pd\n'), ((45933, 45945), 'pandas.isna', 'pd.isna', (['val'], {}), '(val)\n', (45940, 45945), True, 'import pandas as pd\n'), ((52881, 52912), 'pandas.Series', 'Series', (["['b']"], {'dtype': '"""category"""'}), "(['b'], dtype='category')\n", (52887, 52912), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((10027, 10064), 'pandas.Series', 'Series', (['[1, 2, 3, 4]'], {'index': 'exp_index'}), '([1, 2, 3, 4], index=exp_index)\n', (10033, 10064), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((11822, 11897), 'pandas.Categorical', 'Categorical', (["['a', 'a', 'b', 'b']"], {'categories': "['a', 'b', 'c']", 'ordered': '(True)'}), "(['a', 'a', 'b', 'b'], categories=['a', 'b', 'c'], ordered=True)\n", (11833, 11897), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((20059, 20072), 'numpy.arange', 'np.arange', (['(20)'], {}), '(20)\n', (20068, 20072), True, 'import numpy as np\n'), ((26755, 26784), 'pandas.Series', 'Series', (['values'], {'dtype': 'c.dtype'}), '(values, dtype=c.dtype)\n', (26761, 26784), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((37418, 37453), 'pandas.Series', 'Series', (['[1, 1, 2]'], {'dtype': '"""category"""'}), "([1, 1, 2], dtype='category')\n", (37424, 37453), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((39917, 39964), 'pandas.CategoricalIndex', 'CategoricalIndex', (["['bar', 'foo']"], {'ordered': '(False)'}), "(['bar', 'foo'], ordered=False)\n", (39933, 39964), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((39979, 40035), 'pandas.CategoricalIndex', 'CategoricalIndex', (["['one', 'three', 'two']"], {'ordered': '(False)'}), "(['one', 'three', 'two'], ordered=False)\n", (39995, 40035), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((41169, 41216), 'pandas.CategoricalIndex', 'CategoricalIndex', (["['bar', 'foo']"], {'ordered': '(False)'}), "(['bar', 'foo'], ordered=False)\n", (41185, 41216), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((41239, 41295), 'pandas.CategoricalIndex', 'CategoricalIndex', (["['one', 'three', 'two']"], {'ordered': '(False)'}), "(['one', 'three', 'two'], ordered=False)\n", (41255, 41295), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((41318, 41339), 'pandas.Index', 'Index', (["['min', 'max']"], {}), "(['min', 'max'])\n", (41323, 41339), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((41609, 41656), 'pandas.CategoricalIndex', 'CategoricalIndex', (["['bar', 'foo']"], {'ordered': '(False)'}), "(['bar', 'foo'], ordered=False)\n", (41625, 41656), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((41679, 41735), 'pandas.CategoricalIndex', 'CategoricalIndex', (["['one', 'three', 'two']"], {'ordered': '(False)'}), "(['one', 'three', 'two'], ordered=False)\n", (41695, 41735), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((41758, 41779), 'pandas.Index', 'Index', (["['min', 'max']"], {}), "(['min', 'max'])\n", (41763, 41779), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((49302, 49330), 'pandas.Categorical', 'Categorical', (['[0, 0, 0, 0, 1]'], {}), '([0, 0, 0, 0, 1])\n', (49313, 49330), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((50728, 50739), 'pandas.Index', 'Index', (['cats'], {}), '(cats)\n', (50733, 50739), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((7184, 7212), 'pandas.CategoricalIndex', 'CategoricalIndex', (["['a', 'b']"], {}), "(['a', 'b'])\n", (7200, 7212), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((7566, 7594), 'pandas.CategoricalIndex', 'CategoricalIndex', (["['a', 'b']"], {}), "(['a', 'b'])\n", (7582, 7594), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((36428, 36480), 'pandas.date_range', 'pd.date_range', (['"""2018-06-01 00"""'], {'freq': '"""1T"""', 'periods': '(3)'}), "('2018-06-01 00', freq='1T', periods=3)\n", (36441, 36480), True, 'import pandas as pd\n'), ((50265, 50296), 'pandas.Series', 'Series', (["['b']"], {'dtype': '"""category"""'}), "(['b'], dtype='category')\n", (50271, 50296), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n'), ((37144, 37179), 'pandas.Series', 'Series', (['[1, 1, 2]'], {'dtype': '"""category"""'}), "([1, 1, 2], dtype='category')\n", (37150, 37179), False, 'from pandas import Categorical, CategoricalIndex, DataFrame, Index, MultiIndex, Series, qcut\n')] |
"""
compute partial correlation
"""
import numpy
def pcor_from_precision(P,zero_diagonal=1):
# given a precision matrix, compute the partial correlation matrix
# based on wikipedia page: http://en.wikipedia.org/wiki/Partial_correlat
#Using_matrix_inversion
pcor=numpy.zeros(P.shape)
for i in range(P.shape[0]):
for j in range(P.shape[1]):
pcor[i,j]=P[i,j]/numpy.sqrt(P[i,i]*P[j,j])
if zero_diagonal==1 and i==j:
pcor[i,j]=0
return pcor
| [
"numpy.zeros",
"numpy.sqrt"
] | [((280, 300), 'numpy.zeros', 'numpy.zeros', (['P.shape'], {}), '(P.shape)\n', (291, 300), False, 'import numpy\n'), ((398, 427), 'numpy.sqrt', 'numpy.sqrt', (['(P[i, i] * P[j, j])'], {}), '(P[i, i] * P[j, j])\n', (408, 427), False, 'import numpy\n')] |
# Copyright (c) OpenMMLab. All rights reserved.
import random
from tempfile import TemporaryDirectory
import numpy as np
import pytest
import torch
from scipy import stats
from torch import nn
from mmcv.cnn import (Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit,
PretrainedInit, TruncNormalInit, UniformInit, XavierInit,
bias_init_with_prob, caffe2_xavier_init, constant_init,
initialize, kaiming_init, normal_init, trunc_normal_init,
uniform_init, xavier_init)
def test_constant_init():
conv_module = nn.Conv2d(3, 16, 3)
constant_init(conv_module, 0.1)
assert conv_module.weight.allclose(
torch.full_like(conv_module.weight, 0.1))
assert conv_module.bias.allclose(torch.zeros_like(conv_module.bias))
conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False)
constant_init(conv_module_no_bias, 0.1)
assert conv_module.weight.allclose(
torch.full_like(conv_module.weight, 0.1))
def test_xavier_init():
conv_module = nn.Conv2d(3, 16, 3)
xavier_init(conv_module, bias=0.1)
assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1))
xavier_init(conv_module, distribution='uniform')
# TODO: sanity check of weight distribution, e.g. mean, std
with pytest.raises(AssertionError):
xavier_init(conv_module, distribution='student-t')
conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False)
xavier_init(conv_module_no_bias)
def test_normal_init():
conv_module = nn.Conv2d(3, 16, 3)
normal_init(conv_module, bias=0.1)
# TODO: sanity check of weight distribution, e.g. mean, std
assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1))
conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False)
normal_init(conv_module_no_bias)
# TODO: sanity check distribution, e.g. mean, std
def test_trunc_normal_init():
def _random_float(a, b):
return (b - a) * random.random() + a
def _is_trunc_normal(tensor, mean, std, a, b):
# scipy's trunc norm is suited for data drawn from N(0, 1),
# so we need to transform our data to test it using scipy.
z_samples = (tensor.view(-1) - mean) / std
z_samples = z_samples.tolist()
a0 = (a - mean) / std
b0 = (b - mean) / std
p_value = stats.kstest(z_samples, 'truncnorm', args=(a0, b0))[1]
return p_value > 0.0001
conv_module = nn.Conv2d(3, 16, 3)
mean = _random_float(-3, 3)
std = _random_float(.01, 1)
a = _random_float(mean - 2 * std, mean)
b = _random_float(mean, mean + 2 * std)
trunc_normal_init(conv_module, mean, std, a, b, bias=0.1)
assert _is_trunc_normal(conv_module.weight, mean, std, a, b)
assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1))
conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False)
trunc_normal_init(conv_module_no_bias)
# TODO: sanity check distribution, e.g. mean, std
def test_uniform_init():
conv_module = nn.Conv2d(3, 16, 3)
uniform_init(conv_module, bias=0.1)
# TODO: sanity check of weight distribution, e.g. mean, std
assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1))
conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False)
uniform_init(conv_module_no_bias)
def test_kaiming_init():
conv_module = nn.Conv2d(3, 16, 3)
kaiming_init(conv_module, bias=0.1)
# TODO: sanity check of weight distribution, e.g. mean, std
assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1))
kaiming_init(conv_module, distribution='uniform')
with pytest.raises(AssertionError):
kaiming_init(conv_module, distribution='student-t')
conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False)
kaiming_init(conv_module_no_bias)
def test_caffe_xavier_init():
conv_module = nn.Conv2d(3, 16, 3)
caffe2_xavier_init(conv_module)
def test_bias_init_with_prob():
conv_module = nn.Conv2d(3, 16, 3)
prior_prob = 0.1
normal_init(conv_module, bias=bias_init_with_prob(0.1))
# TODO: sanity check of weight distribution, e.g. mean, std
bias = float(-np.log((1 - prior_prob) / prior_prob))
assert conv_module.bias.allclose(torch.full_like(conv_module.bias, bias))
def test_constaninit():
"""test ConstantInit class."""
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
func = ConstantInit(val=1, bias=2, layer='Conv2d')
func(model)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 2.))
assert not torch.equal(model[2].weight,
torch.full(model[2].weight.shape, 1.))
assert not torch.equal(model[2].bias, torch.full(model[2].bias.shape, 2.))
func = ConstantInit(val=3, bias_prob=0.01, layer='Linear')
func(model)
res = bias_init_with_prob(0.01)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 3.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 2.))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, res))
# test layer key with base class name
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1))
func = ConstantInit(val=4., bias=5., layer='_ConvNd')
func(model)
assert torch.all(model[0].weight == 4.)
assert torch.all(model[2].weight == 4.)
assert torch.all(model[0].bias == 5.)
assert torch.all(model[2].bias == 5.)
# test bias input type
with pytest.raises(TypeError):
func = ConstantInit(val=1, bias='1')
# test bias_prob type
with pytest.raises(TypeError):
func = ConstantInit(val=1, bias_prob='1')
# test layer input type
with pytest.raises(TypeError):
func = ConstantInit(val=1, layer=1)
def test_xavierinit():
"""test XavierInit class."""
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
func = XavierInit(bias=0.1, layer='Conv2d')
func(model)
assert model[0].bias.allclose(torch.full_like(model[2].bias, 0.1))
assert not model[2].bias.allclose(torch.full_like(model[0].bias, 0.1))
constant_func = ConstantInit(val=0, bias=0, layer=['Conv2d', 'Linear'])
func = XavierInit(gain=100, bias_prob=0.01, layer=['Conv2d', 'Linear'])
model.apply(constant_func)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 0.))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 0.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 0.))
res = bias_init_with_prob(0.01)
func(model)
assert not torch.equal(model[0].weight,
torch.full(model[0].weight.shape, 0.))
assert not torch.equal(model[2].weight,
torch.full(model[2].weight.shape, 0.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, res))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, res))
# test layer key with base class name
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1))
func = ConstantInit(val=4., bias=5., layer='_ConvNd')
func(model)
assert torch.all(model[0].weight == 4.)
assert torch.all(model[2].weight == 4.)
assert torch.all(model[0].bias == 5.)
assert torch.all(model[2].bias == 5.)
func = XavierInit(gain=100, bias_prob=0.01, layer='_ConvNd')
func(model)
assert not torch.all(model[0].weight == 4.)
assert not torch.all(model[2].weight == 4.)
assert torch.all(model[0].bias == res)
assert torch.all(model[2].bias == res)
# test bias input type
with pytest.raises(TypeError):
func = XavierInit(bias='0.1', layer='Conv2d')
# test layer inpur type
with pytest.raises(TypeError):
func = XavierInit(bias=0.1, layer=1)
def test_normalinit():
"""test Normalinit class."""
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
func = NormalInit(mean=100, std=1e-5, bias=200, layer=['Conv2d', 'Linear'])
func(model)
assert model[0].weight.allclose(torch.tensor(100.))
assert model[2].weight.allclose(torch.tensor(100.))
assert model[0].bias.allclose(torch.tensor(200.))
assert model[2].bias.allclose(torch.tensor(200.))
func = NormalInit(
mean=300, std=1e-5, bias_prob=0.01, layer=['Conv2d', 'Linear'])
res = bias_init_with_prob(0.01)
func(model)
assert model[0].weight.allclose(torch.tensor(300.))
assert model[2].weight.allclose(torch.tensor(300.))
assert model[0].bias.allclose(torch.tensor(res))
assert model[2].bias.allclose(torch.tensor(res))
# test layer key with base class name
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1))
func = NormalInit(mean=300, std=1e-5, bias_prob=0.01, layer='_ConvNd')
func(model)
assert model[0].weight.allclose(torch.tensor(300.))
assert model[2].weight.allclose(torch.tensor(300.))
assert torch.all(model[0].bias == res)
assert torch.all(model[2].bias == res)
def test_truncnormalinit():
"""test TruncNormalInit class."""
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
func = TruncNormalInit(
mean=100, std=1e-5, bias=200, a=0, b=200, layer=['Conv2d', 'Linear'])
func(model)
assert model[0].weight.allclose(torch.tensor(100.))
assert model[2].weight.allclose(torch.tensor(100.))
assert model[0].bias.allclose(torch.tensor(200.))
assert model[2].bias.allclose(torch.tensor(200.))
func = TruncNormalInit(
mean=300,
std=1e-5,
a=100,
b=400,
bias_prob=0.01,
layer=['Conv2d', 'Linear'])
res = bias_init_with_prob(0.01)
func(model)
assert model[0].weight.allclose(torch.tensor(300.))
assert model[2].weight.allclose(torch.tensor(300.))
assert model[0].bias.allclose(torch.tensor(res))
assert model[2].bias.allclose(torch.tensor(res))
# test layer key with base class name
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1))
func = TruncNormalInit(
mean=300, std=1e-5, a=100, b=400, bias_prob=0.01, layer='_ConvNd')
func(model)
assert model[0].weight.allclose(torch.tensor(300.))
assert model[2].weight.allclose(torch.tensor(300.))
assert torch.all(model[0].bias == res)
assert torch.all(model[2].bias == res)
def test_uniforminit():
""""test UniformInit class."""
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
func = UniformInit(a=1, b=1, bias=2, layer=['Conv2d', 'Linear'])
func(model)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 1.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 2.))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 2.))
func = UniformInit(a=100, b=100, layer=['Conv2d', 'Linear'], bias=10)
func(model)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape,
100.))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape,
100.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 10.))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 10.))
# test layer key with base class name
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1))
func = UniformInit(a=100, b=100, bias_prob=0.01, layer='_ConvNd')
res = bias_init_with_prob(0.01)
func(model)
assert torch.all(model[0].weight == 100.)
assert torch.all(model[2].weight == 100.)
assert torch.all(model[0].bias == res)
assert torch.all(model[2].bias == res)
def test_kaiminginit():
"""test KaimingInit class."""
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
func = KaimingInit(bias=0.1, layer='Conv2d')
func(model)
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.1))
assert not torch.equal(model[2].bias, torch.full(model[2].bias.shape, 0.1))
func = KaimingInit(a=100, bias=10, layer=['Conv2d', 'Linear'])
constant_func = ConstantInit(val=0, bias=0, layer=['Conv2d', 'Linear'])
model.apply(constant_func)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 0.))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 0.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 0.))
func(model)
assert not torch.equal(model[0].weight,
torch.full(model[0].weight.shape, 0.))
assert not torch.equal(model[2].weight,
torch.full(model[2].weight.shape, 0.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 10.))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 10.))
# test layer key with base class name
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1))
func = KaimingInit(bias=0.1, layer='_ConvNd')
func(model)
assert torch.all(model[0].bias == 0.1)
assert torch.all(model[2].bias == 0.1)
func = KaimingInit(a=100, bias=10, layer='_ConvNd')
constant_func = ConstantInit(val=0, bias=0, layer='_ConvNd')
model.apply(constant_func)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 0.))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 0.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 0.))
func(model)
assert not torch.equal(model[0].weight,
torch.full(model[0].weight.shape, 0.))
assert not torch.equal(model[2].weight,
torch.full(model[2].weight.shape, 0.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 10.))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 10.))
def test_caffe2xavierinit():
"""test Caffe2XavierInit."""
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
func = Caffe2XavierInit(bias=0.1, layer='Conv2d')
func(model)
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.1))
assert not torch.equal(model[2].bias, torch.full(model[2].bias.shape, 0.1))
class FooModule(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(1, 2)
self.conv2d = nn.Conv2d(3, 1, 3)
self.conv2d_2 = nn.Conv2d(3, 2, 3)
def test_pretrainedinit():
"""test PretrainedInit class."""
modelA = FooModule()
constant_func = ConstantInit(val=1, bias=2, layer=['Conv2d', 'Linear'])
modelA.apply(constant_func)
modelB = FooModule()
funcB = PretrainedInit(checkpoint='modelA.pth')
modelC = nn.Linear(1, 2)
funcC = PretrainedInit(checkpoint='modelA.pth', prefix='linear.')
with TemporaryDirectory():
torch.save(modelA.state_dict(), 'modelA.pth')
funcB(modelB)
assert torch.equal(modelB.linear.weight,
torch.full(modelB.linear.weight.shape, 1.))
assert torch.equal(modelB.linear.bias,
torch.full(modelB.linear.bias.shape, 2.))
assert torch.equal(modelB.conv2d.weight,
torch.full(modelB.conv2d.weight.shape, 1.))
assert torch.equal(modelB.conv2d.bias,
torch.full(modelB.conv2d.bias.shape, 2.))
assert torch.equal(modelB.conv2d_2.weight,
torch.full(modelB.conv2d_2.weight.shape, 1.))
assert torch.equal(modelB.conv2d_2.bias,
torch.full(modelB.conv2d_2.bias.shape, 2.))
funcC(modelC)
assert torch.equal(modelC.weight, torch.full(modelC.weight.shape, 1.))
assert torch.equal(modelC.bias, torch.full(modelC.bias.shape, 2.))
def test_initialize():
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
foonet = FooModule()
# test layer key
init_cfg = dict(type='Constant', layer=['Conv2d', 'Linear'], val=1, bias=2)
initialize(model, init_cfg)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 1.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 2.))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 2.))
assert init_cfg == dict(
type='Constant', layer=['Conv2d', 'Linear'], val=1, bias=2)
# test init_cfg with list type
init_cfg = [
dict(type='Constant', layer='Conv2d', val=1, bias=2),
dict(type='Constant', layer='Linear', val=3, bias=4)
]
initialize(model, init_cfg)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 3.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 2.))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 4.))
assert init_cfg == [
dict(type='Constant', layer='Conv2d', val=1, bias=2),
dict(type='Constant', layer='Linear', val=3, bias=4)
]
# test layer key and override key
init_cfg = dict(
type='Constant',
val=1,
bias=2,
layer=['Conv2d', 'Linear'],
override=dict(type='Constant', name='conv2d_2', val=3, bias=4))
initialize(foonet, init_cfg)
assert torch.equal(foonet.linear.weight,
torch.full(foonet.linear.weight.shape, 1.))
assert torch.equal(foonet.linear.bias,
torch.full(foonet.linear.bias.shape, 2.))
assert torch.equal(foonet.conv2d.weight,
torch.full(foonet.conv2d.weight.shape, 1.))
assert torch.equal(foonet.conv2d.bias,
torch.full(foonet.conv2d.bias.shape, 2.))
assert torch.equal(foonet.conv2d_2.weight,
torch.full(foonet.conv2d_2.weight.shape, 3.))
assert torch.equal(foonet.conv2d_2.bias,
torch.full(foonet.conv2d_2.bias.shape, 4.))
assert init_cfg == dict(
type='Constant',
val=1,
bias=2,
layer=['Conv2d', 'Linear'],
override=dict(type='Constant', name='conv2d_2', val=3, bias=4))
# test override key
init_cfg = dict(
type='Constant', val=5, bias=6, override=dict(name='conv2d_2'))
initialize(foonet, init_cfg)
assert not torch.equal(foonet.linear.weight,
torch.full(foonet.linear.weight.shape, 5.))
assert not torch.equal(foonet.linear.bias,
torch.full(foonet.linear.bias.shape, 6.))
assert not torch.equal(foonet.conv2d.weight,
torch.full(foonet.conv2d.weight.shape, 5.))
assert not torch.equal(foonet.conv2d.bias,
torch.full(foonet.conv2d.bias.shape, 6.))
assert torch.equal(foonet.conv2d_2.weight,
torch.full(foonet.conv2d_2.weight.shape, 5.))
assert torch.equal(foonet.conv2d_2.bias,
torch.full(foonet.conv2d_2.bias.shape, 6.))
assert init_cfg == dict(
type='Constant', val=5, bias=6, override=dict(name='conv2d_2'))
init_cfg = dict(
type='Pretrained',
checkpoint='modelA.pth',
override=dict(type='Constant', name='conv2d_2', val=3, bias=4))
modelA = FooModule()
constant_func = ConstantInit(val=1, bias=2, layer=['Conv2d', 'Linear'])
modelA.apply(constant_func)
with TemporaryDirectory():
torch.save(modelA.state_dict(), 'modelA.pth')
initialize(foonet, init_cfg)
assert torch.equal(foonet.linear.weight,
torch.full(foonet.linear.weight.shape, 1.))
assert torch.equal(foonet.linear.bias,
torch.full(foonet.linear.bias.shape, 2.))
assert torch.equal(foonet.conv2d.weight,
torch.full(foonet.conv2d.weight.shape, 1.))
assert torch.equal(foonet.conv2d.bias,
torch.full(foonet.conv2d.bias.shape, 2.))
assert torch.equal(foonet.conv2d_2.weight,
torch.full(foonet.conv2d_2.weight.shape, 3.))
assert torch.equal(foonet.conv2d_2.bias,
torch.full(foonet.conv2d_2.bias.shape, 4.))
assert init_cfg == dict(
type='Pretrained',
checkpoint='modelA.pth',
override=dict(type='Constant', name='conv2d_2', val=3, bias=4))
# test init_cfg type
with pytest.raises(TypeError):
init_cfg = 'init_cfg'
initialize(foonet, init_cfg)
# test override value type
with pytest.raises(TypeError):
init_cfg = dict(
type='Constant',
val=1,
bias=2,
layer=['Conv2d', 'Linear'],
override='conv')
initialize(foonet, init_cfg)
# test override name
with pytest.raises(RuntimeError):
init_cfg = dict(
type='Constant',
val=1,
bias=2,
layer=['Conv2d', 'Linear'],
override=dict(type='Constant', name='conv2d_3', val=3, bias=4))
initialize(foonet, init_cfg)
# test list override name
with pytest.raises(RuntimeError):
init_cfg = dict(
type='Constant',
val=1,
bias=2,
layer=['Conv2d', 'Linear'],
override=[
dict(type='Constant', name='conv2d', val=3, bias=4),
dict(type='Constant', name='conv2d_3', val=5, bias=6)
])
initialize(foonet, init_cfg)
# test override with args except type key
with pytest.raises(ValueError):
init_cfg = dict(
type='Constant',
val=1,
bias=2,
override=dict(name='conv2d_2', val=3, bias=4))
initialize(foonet, init_cfg)
# test override without name
with pytest.raises(ValueError):
init_cfg = dict(
type='Constant',
val=1,
bias=2,
override=dict(type='Constant', val=3, bias=4))
initialize(foonet, init_cfg)
| [
"torch.full",
"mmcv.cnn.kaiming_init",
"mmcv.cnn.Caffe2XavierInit",
"mmcv.cnn.ConstantInit",
"mmcv.cnn.initialize",
"tempfile.TemporaryDirectory",
"mmcv.cnn.caffe2_xavier_init",
"torch.nn.Conv1d",
"mmcv.cnn.XavierInit",
"pytest.raises",
"mmcv.cnn.normal_init",
"torch.nn.Linear",
"mmcv.cnn.Ka... | [((607, 626), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(16)', '(3)'], {}), '(3, 16, 3)\n', (616, 626), False, 'from torch import nn\n'), ((631, 662), 'mmcv.cnn.constant_init', 'constant_init', (['conv_module', '(0.1)'], {}), '(conv_module, 0.1)\n', (644, 662), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((852, 883), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(16)', '(3)'], {'bias': '(False)'}), '(3, 16, 3, bias=False)\n', (861, 883), False, 'from torch import nn\n'), ((888, 927), 'mmcv.cnn.constant_init', 'constant_init', (['conv_module_no_bias', '(0.1)'], {}), '(conv_module_no_bias, 0.1)\n', (901, 927), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((1062, 1081), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(16)', '(3)'], {}), '(3, 16, 3)\n', (1071, 1081), False, 'from torch import nn\n'), ((1086, 1120), 'mmcv.cnn.xavier_init', 'xavier_init', (['conv_module'], {'bias': '(0.1)'}), '(conv_module, bias=0.1)\n', (1097, 1120), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((1202, 1250), 'mmcv.cnn.xavier_init', 'xavier_init', (['conv_module'], {'distribution': '"""uniform"""'}), "(conv_module, distribution='uniform')\n", (1213, 1250), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((1440, 1471), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(16)', '(3)'], {'bias': '(False)'}), '(3, 16, 3, bias=False)\n', (1449, 1471), False, 'from torch import nn\n'), ((1476, 1508), 'mmcv.cnn.xavier_init', 'xavier_init', (['conv_module_no_bias'], {}), '(conv_module_no_bias)\n', (1487, 1508), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((1553, 1572), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(16)', '(3)'], {}), '(3, 16, 3)\n', (1562, 1572), False, 'from torch import nn\n'), ((1577, 1611), 'mmcv.cnn.normal_init', 'normal_init', (['conv_module'], {'bias': '(0.1)'}), '(conv_module, bias=0.1)\n', (1588, 1611), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((1779, 1810), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(16)', '(3)'], {'bias': '(False)'}), '(3, 16, 3, bias=False)\n', (1788, 1810), False, 'from torch import nn\n'), ((1815, 1847), 'mmcv.cnn.normal_init', 'normal_init', (['conv_module_no_bias'], {}), '(conv_module_no_bias)\n', (1826, 1847), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((2470, 2489), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(16)', '(3)'], {}), '(3, 16, 3)\n', (2479, 2489), False, 'from torch import nn\n'), ((2646, 2703), 'mmcv.cnn.trunc_normal_init', 'trunc_normal_init', (['conv_module', 'mean', 'std', 'a', 'b'], {'bias': '(0.1)'}), '(conv_module, mean, std, a, b, bias=0.1)\n', (2663, 2703), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((2873, 2904), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(16)', '(3)'], {'bias': '(False)'}), '(3, 16, 3, bias=False)\n', (2882, 2904), False, 'from torch import nn\n'), ((2909, 2947), 'mmcv.cnn.trunc_normal_init', 'trunc_normal_init', (['conv_module_no_bias'], {}), '(conv_module_no_bias)\n', (2926, 2947), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((3047, 3066), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(16)', '(3)'], {}), '(3, 16, 3)\n', (3056, 3066), False, 'from torch import nn\n'), ((3071, 3106), 'mmcv.cnn.uniform_init', 'uniform_init', (['conv_module'], {'bias': '(0.1)'}), '(conv_module, bias=0.1)\n', (3083, 3106), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((3274, 3305), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(16)', '(3)'], {'bias': '(False)'}), '(3, 16, 3, bias=False)\n', (3283, 3305), False, 'from torch import nn\n'), ((3310, 3343), 'mmcv.cnn.uniform_init', 'uniform_init', (['conv_module_no_bias'], {}), '(conv_module_no_bias)\n', (3322, 3343), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((3389, 3408), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(16)', '(3)'], {}), '(3, 16, 3)\n', (3398, 3408), False, 'from torch import nn\n'), ((3413, 3448), 'mmcv.cnn.kaiming_init', 'kaiming_init', (['conv_module'], {'bias': '(0.1)'}), '(conv_module, bias=0.1)\n', (3425, 3448), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((3594, 3643), 'mmcv.cnn.kaiming_init', 'kaiming_init', (['conv_module'], {'distribution': '"""uniform"""'}), "(conv_module, distribution='uniform')\n", (3606, 3643), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((3770, 3801), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(16)', '(3)'], {'bias': '(False)'}), '(3, 16, 3, bias=False)\n', (3779, 3801), False, 'from torch import nn\n'), ((3806, 3839), 'mmcv.cnn.kaiming_init', 'kaiming_init', (['conv_module_no_bias'], {}), '(conv_module_no_bias)\n', (3818, 3839), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((3890, 3909), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(16)', '(3)'], {}), '(3, 16, 3)\n', (3899, 3909), False, 'from torch import nn\n'), ((3914, 3945), 'mmcv.cnn.caffe2_xavier_init', 'caffe2_xavier_init', (['conv_module'], {}), '(conv_module)\n', (3932, 3945), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((3998, 4017), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(16)', '(3)'], {}), '(3, 16, 3)\n', (4007, 4017), False, 'from torch import nn\n'), ((4444, 4487), 'mmcv.cnn.ConstantInit', 'ConstantInit', ([], {'val': '(1)', 'bias': '(2)', 'layer': '"""Conv2d"""'}), "(val=1, bias=2, layer='Conv2d')\n", (4456, 4487), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((4860, 4911), 'mmcv.cnn.ConstantInit', 'ConstantInit', ([], {'val': '(3)', 'bias_prob': '(0.01)', 'layer': '"""Linear"""'}), "(val=3, bias_prob=0.01, layer='Linear')\n", (4872, 4911), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((4938, 4963), 'mmcv.cnn.bias_init_with_prob', 'bias_init_with_prob', (['(0.01)'], {}), '(0.01)\n', (4957, 4963), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((5405, 5453), 'mmcv.cnn.ConstantInit', 'ConstantInit', ([], {'val': '(4.0)', 'bias': '(5.0)', 'layer': '"""_ConvNd"""'}), "(val=4.0, bias=5.0, layer='_ConvNd')\n", (5417, 5453), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((5479, 5512), 'torch.all', 'torch.all', (['(model[0].weight == 4.0)'], {}), '(model[0].weight == 4.0)\n', (5488, 5512), False, 'import torch\n'), ((5523, 5556), 'torch.all', 'torch.all', (['(model[2].weight == 4.0)'], {}), '(model[2].weight == 4.0)\n', (5532, 5556), False, 'import torch\n'), ((5567, 5598), 'torch.all', 'torch.all', (['(model[0].bias == 5.0)'], {}), '(model[0].bias == 5.0)\n', (5576, 5598), False, 'import torch\n'), ((5609, 5640), 'torch.all', 'torch.all', (['(model[2].bias == 5.0)'], {}), '(model[2].bias == 5.0)\n', (5618, 5640), False, 'import torch\n'), ((6109, 6145), 'mmcv.cnn.XavierInit', 'XavierInit', ([], {'bias': '(0.1)', 'layer': '"""Conv2d"""'}), "(bias=0.1, layer='Conv2d')\n", (6119, 6145), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((6329, 6384), 'mmcv.cnn.ConstantInit', 'ConstantInit', ([], {'val': '(0)', 'bias': '(0)', 'layer': "['Conv2d', 'Linear']"}), "(val=0, bias=0, layer=['Conv2d', 'Linear'])\n", (6341, 6384), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((6396, 6460), 'mmcv.cnn.XavierInit', 'XavierInit', ([], {'gain': '(100)', 'bias_prob': '(0.01)', 'layer': "['Conv2d', 'Linear']"}), "(gain=100, bias_prob=0.01, layer=['Conv2d', 'Linear'])\n", (6406, 6460), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((6811, 6836), 'mmcv.cnn.bias_init_with_prob', 'bias_init_with_prob', (['(0.01)'], {}), '(0.01)\n', (6830, 6836), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((7356, 7404), 'mmcv.cnn.ConstantInit', 'ConstantInit', ([], {'val': '(4.0)', 'bias': '(5.0)', 'layer': '"""_ConvNd"""'}), "(val=4.0, bias=5.0, layer='_ConvNd')\n", (7368, 7404), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((7430, 7463), 'torch.all', 'torch.all', (['(model[0].weight == 4.0)'], {}), '(model[0].weight == 4.0)\n', (7439, 7463), False, 'import torch\n'), ((7474, 7507), 'torch.all', 'torch.all', (['(model[2].weight == 4.0)'], {}), '(model[2].weight == 4.0)\n', (7483, 7507), False, 'import torch\n'), ((7518, 7549), 'torch.all', 'torch.all', (['(model[0].bias == 5.0)'], {}), '(model[0].bias == 5.0)\n', (7527, 7549), False, 'import torch\n'), ((7560, 7591), 'torch.all', 'torch.all', (['(model[2].bias == 5.0)'], {}), '(model[2].bias == 5.0)\n', (7569, 7591), False, 'import torch\n'), ((7603, 7656), 'mmcv.cnn.XavierInit', 'XavierInit', ([], {'gain': '(100)', 'bias_prob': '(0.01)', 'layer': '"""_ConvNd"""'}), "(gain=100, bias_prob=0.01, layer='_ConvNd')\n", (7613, 7656), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((7780, 7811), 'torch.all', 'torch.all', (['(model[0].bias == res)'], {}), '(model[0].bias == res)\n', (7789, 7811), False, 'import torch\n'), ((7823, 7854), 'torch.all', 'torch.all', (['(model[2].bias == res)'], {}), '(model[2].bias == res)\n', (7832, 7854), False, 'import torch\n'), ((8224, 8293), 'mmcv.cnn.NormalInit', 'NormalInit', ([], {'mean': '(100)', 'std': '(1e-05)', 'bias': '(200)', 'layer': "['Conv2d', 'Linear']"}), "(mean=100, std=1e-05, bias=200, layer=['Conv2d', 'Linear'])\n", (8234, 8293), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((8541, 8616), 'mmcv.cnn.NormalInit', 'NormalInit', ([], {'mean': '(300)', 'std': '(1e-05)', 'bias_prob': '(0.01)', 'layer': "['Conv2d', 'Linear']"}), "(mean=300, std=1e-05, bias_prob=0.01, layer=['Conv2d', 'Linear'])\n", (8551, 8616), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((8635, 8660), 'mmcv.cnn.bias_init_with_prob', 'bias_init_with_prob', (['(0.01)'], {}), '(0.01)\n', (8654, 8660), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((9027, 9091), 'mmcv.cnn.NormalInit', 'NormalInit', ([], {'mean': '(300)', 'std': '(1e-05)', 'bias_prob': '(0.01)', 'layer': '"""_ConvNd"""'}), "(mean=300, std=1e-05, bias_prob=0.01, layer='_ConvNd')\n", (9037, 9091), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((9230, 9261), 'torch.all', 'torch.all', (['(model[0].bias == res)'], {}), '(model[0].bias == res)\n', (9239, 9261), False, 'import torch\n'), ((9273, 9304), 'torch.all', 'torch.all', (['(model[2].bias == res)'], {}), '(model[2].bias == res)\n', (9282, 9304), False, 'import torch\n'), ((9459, 9549), 'mmcv.cnn.TruncNormalInit', 'TruncNormalInit', ([], {'mean': '(100)', 'std': '(1e-05)', 'bias': '(200)', 'a': '(0)', 'b': '(200)', 'layer': "['Conv2d', 'Linear']"}), "(mean=100, std=1e-05, bias=200, a=0, b=200, layer=['Conv2d',\n 'Linear'])\n", (9474, 9549), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((9802, 9901), 'mmcv.cnn.TruncNormalInit', 'TruncNormalInit', ([], {'mean': '(300)', 'std': '(1e-05)', 'a': '(100)', 'b': '(400)', 'bias_prob': '(0.01)', 'layer': "['Conv2d', 'Linear']"}), "(mean=300, std=1e-05, a=100, b=400, bias_prob=0.01, layer=[\n 'Conv2d', 'Linear'])\n", (9817, 9901), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((9955, 9980), 'mmcv.cnn.bias_init_with_prob', 'bias_init_with_prob', (['(0.01)'], {}), '(0.01)\n', (9974, 9980), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((10347, 10435), 'mmcv.cnn.TruncNormalInit', 'TruncNormalInit', ([], {'mean': '(300)', 'std': '(1e-05)', 'a': '(100)', 'b': '(400)', 'bias_prob': '(0.01)', 'layer': '"""_ConvNd"""'}), "(mean=300, std=1e-05, a=100, b=400, bias_prob=0.01, layer=\n '_ConvNd')\n", (10362, 10435), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((10578, 10609), 'torch.all', 'torch.all', (['(model[0].bias == res)'], {}), '(model[0].bias == res)\n', (10587, 10609), False, 'import torch\n'), ((10621, 10652), 'torch.all', 'torch.all', (['(model[2].bias == res)'], {}), '(model[2].bias == res)\n', (10630, 10652), False, 'import torch\n'), ((10799, 10856), 'mmcv.cnn.UniformInit', 'UniformInit', ([], {'a': '(1)', 'b': '(1)', 'bias': '(2)', 'layer': "['Conv2d', 'Linear']"}), "(a=1, b=1, bias=2, layer=['Conv2d', 'Linear'])\n", (10810, 10856), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((11193, 11255), 'mmcv.cnn.UniformInit', 'UniformInit', ([], {'a': '(100)', 'b': '(100)', 'layer': "['Conv2d', 'Linear']", 'bias': '(10)'}), "(a=100, b=100, layer=['Conv2d', 'Linear'], bias=10)\n", (11204, 11255), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((11820, 11878), 'mmcv.cnn.UniformInit', 'UniformInit', ([], {'a': '(100)', 'b': '(100)', 'bias_prob': '(0.01)', 'layer': '"""_ConvNd"""'}), "(a=100, b=100, bias_prob=0.01, layer='_ConvNd')\n", (11831, 11878), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((11889, 11914), 'mmcv.cnn.bias_init_with_prob', 'bias_init_with_prob', (['(0.01)'], {}), '(0.01)\n', (11908, 11914), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((11942, 11977), 'torch.all', 'torch.all', (['(model[0].weight == 100.0)'], {}), '(model[0].weight == 100.0)\n', (11951, 11977), False, 'import torch\n'), ((11988, 12023), 'torch.all', 'torch.all', (['(model[2].weight == 100.0)'], {}), '(model[2].weight == 100.0)\n', (11997, 12023), False, 'import torch\n'), ((12034, 12065), 'torch.all', 'torch.all', (['(model[0].bias == res)'], {}), '(model[0].bias == res)\n', (12043, 12065), False, 'import torch\n'), ((12077, 12108), 'torch.all', 'torch.all', (['(model[2].bias == res)'], {}), '(model[2].bias == res)\n', (12086, 12108), False, 'import torch\n'), ((12254, 12291), 'mmcv.cnn.KaimingInit', 'KaimingInit', ([], {'bias': '(0.1)', 'layer': '"""Conv2d"""'}), "(bias=0.1, layer='Conv2d')\n", (12265, 12291), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((12476, 12531), 'mmcv.cnn.KaimingInit', 'KaimingInit', ([], {'a': '(100)', 'bias': '(10)', 'layer': "['Conv2d', 'Linear']"}), "(a=100, bias=10, layer=['Conv2d', 'Linear'])\n", (12487, 12531), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((12552, 12607), 'mmcv.cnn.ConstantInit', 'ConstantInit', ([], {'val': '(0)', 'bias': '(0)', 'layer': "['Conv2d', 'Linear']"}), "(val=0, bias=0, layer=['Conv2d', 'Linear'])\n", (12564, 12607), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((13467, 13505), 'mmcv.cnn.KaimingInit', 'KaimingInit', ([], {'bias': '(0.1)', 'layer': '"""_ConvNd"""'}), "(bias=0.1, layer='_ConvNd')\n", (13478, 13505), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((13533, 13564), 'torch.all', 'torch.all', (['(model[0].bias == 0.1)'], {}), '(model[0].bias == 0.1)\n', (13542, 13564), False, 'import torch\n'), ((13576, 13607), 'torch.all', 'torch.all', (['(model[2].bias == 0.1)'], {}), '(model[2].bias == 0.1)\n', (13585, 13607), False, 'import torch\n'), ((13620, 13664), 'mmcv.cnn.KaimingInit', 'KaimingInit', ([], {'a': '(100)', 'bias': '(10)', 'layer': '"""_ConvNd"""'}), "(a=100, bias=10, layer='_ConvNd')\n", (13631, 13664), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((13685, 13729), 'mmcv.cnn.ConstantInit', 'ConstantInit', ([], {'val': '(0)', 'bias': '(0)', 'layer': '"""_ConvNd"""'}), "(val=0, bias=0, layer='_ConvNd')\n", (13697, 13729), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((14607, 14649), 'mmcv.cnn.Caffe2XavierInit', 'Caffe2XavierInit', ([], {'bias': '(0.1)', 'layer': '"""Conv2d"""'}), "(bias=0.1, layer='Conv2d')\n", (14623, 14649), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((15138, 15193), 'mmcv.cnn.ConstantInit', 'ConstantInit', ([], {'val': '(1)', 'bias': '(2)', 'layer': "['Conv2d', 'Linear']"}), "(val=1, bias=2, layer=['Conv2d', 'Linear'])\n", (15150, 15193), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((15263, 15302), 'mmcv.cnn.PretrainedInit', 'PretrainedInit', ([], {'checkpoint': '"""modelA.pth"""'}), "(checkpoint='modelA.pth')\n", (15277, 15302), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((15316, 15331), 'torch.nn.Linear', 'nn.Linear', (['(1)', '(2)'], {}), '(1, 2)\n', (15325, 15331), False, 'from torch import nn\n'), ((15344, 15401), 'mmcv.cnn.PretrainedInit', 'PretrainedInit', ([], {'checkpoint': '"""modelA.pth"""', 'prefix': '"""linear."""'}), "(checkpoint='modelA.pth', prefix='linear.')\n", (15358, 15401), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((16632, 16659), 'mmcv.cnn.initialize', 'initialize', (['model', 'init_cfg'], {}), '(model, init_cfg)\n', (16642, 16659), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((17251, 17278), 'mmcv.cnn.initialize', 'initialize', (['model', 'init_cfg'], {}), '(model, init_cfg)\n', (17261, 17278), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((17969, 17997), 'mmcv.cnn.initialize', 'initialize', (['foonet', 'init_cfg'], {}), '(foonet, init_cfg)\n', (17979, 17997), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((18981, 19009), 'mmcv.cnn.initialize', 'initialize', (['foonet', 'init_cfg'], {}), '(foonet, init_cfg)\n', (18991, 19009), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((20010, 20065), 'mmcv.cnn.ConstantInit', 'ConstantInit', ([], {'val': '(1)', 'bias': '(2)', 'layer': "['Conv2d', 'Linear']"}), "(val=1, bias=2, layer=['Conv2d', 'Linear'])\n", (20022, 20065), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((711, 751), 'torch.full_like', 'torch.full_like', (['conv_module.weight', '(0.1)'], {}), '(conv_module.weight, 0.1)\n', (726, 751), False, 'import torch\n'), ((790, 824), 'torch.zeros_like', 'torch.zeros_like', (['conv_module.bias'], {}), '(conv_module.bias)\n', (806, 824), False, 'import torch\n'), ((976, 1016), 'torch.full_like', 'torch.full_like', (['conv_module.weight', '(0.1)'], {}), '(conv_module.weight, 0.1)\n', (991, 1016), False, 'import torch\n'), ((1158, 1196), 'torch.full_like', 'torch.full_like', (['conv_module.bias', '(0.1)'], {}), '(conv_module.bias, 0.1)\n', (1173, 1196), False, 'import torch\n'), ((1324, 1353), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (1337, 1353), False, 'import pytest\n'), ((1363, 1413), 'mmcv.cnn.xavier_init', 'xavier_init', (['conv_module'], {'distribution': '"""student-t"""'}), "(conv_module, distribution='student-t')\n", (1374, 1413), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((1713, 1751), 'torch.full_like', 'torch.full_like', (['conv_module.bias', '(0.1)'], {}), '(conv_module.bias, 0.1)\n', (1728, 1751), False, 'import torch\n'), ((2806, 2844), 'torch.full_like', 'torch.full_like', (['conv_module.bias', '(0.1)'], {}), '(conv_module.bias, 0.1)\n', (2821, 2844), False, 'import torch\n'), ((3208, 3246), 'torch.full_like', 'torch.full_like', (['conv_module.bias', '(0.1)'], {}), '(conv_module.bias, 0.1)\n', (3223, 3246), False, 'import torch\n'), ((3550, 3588), 'torch.full_like', 'torch.full_like', (['conv_module.bias', '(0.1)'], {}), '(conv_module.bias, 0.1)\n', (3565, 3588), False, 'import torch\n'), ((3653, 3682), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (3666, 3682), False, 'import pytest\n'), ((3692, 3743), 'mmcv.cnn.kaiming_init', 'kaiming_init', (['conv_module'], {'distribution': '"""student-t"""'}), "(conv_module, distribution='student-t')\n", (3704, 3743), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((4257, 4296), 'torch.full_like', 'torch.full_like', (['conv_module.bias', 'bias'], {}), '(conv_module.bias, bias)\n', (4272, 4296), False, 'import torch\n'), ((4385, 4403), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (4394, 4403), False, 'from torch import nn\n'), ((4405, 4414), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4412, 4414), False, 'from torch import nn\n'), ((4416, 4431), 'torch.nn.Linear', 'nn.Linear', (['(1)', '(2)'], {}), '(1, 2)\n', (4425, 4431), False, 'from torch import nn\n'), ((4544, 4582), 'torch.full', 'torch.full', (['model[0].weight.shape', '(1.0)'], {}), '(model[0].weight.shape, 1.0)\n', (4554, 4582), False, 'import torch\n'), ((4621, 4657), 'torch.full', 'torch.full', (['model[0].bias.shape', '(2.0)'], {}), '(model[0].bias.shape, 2.0)\n', (4631, 4657), False, 'import torch\n'), ((5005, 5043), 'torch.full', 'torch.full', (['model[0].weight.shape', '(1.0)'], {}), '(model[0].weight.shape, 1.0)\n', (5015, 5043), False, 'import torch\n'), ((5084, 5122), 'torch.full', 'torch.full', (['model[2].weight.shape', '(3.0)'], {}), '(model[2].weight.shape, 3.0)\n', (5094, 5122), False, 'import torch\n'), ((5161, 5197), 'torch.full', 'torch.full', (['model[0].bias.shape', '(2.0)'], {}), '(model[0].bias.shape, 2.0)\n', (5171, 5197), False, 'import torch\n'), ((5236, 5272), 'torch.full', 'torch.full', (['model[2].bias.shape', 'res'], {}), '(model[2].bias.shape, res)\n', (5246, 5272), False, 'import torch\n'), ((5343, 5361), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (5352, 5361), False, 'from torch import nn\n'), ((5363, 5372), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (5370, 5372), False, 'from torch import nn\n'), ((5374, 5392), 'torch.nn.Conv1d', 'nn.Conv1d', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (5383, 5392), False, 'from torch import nn\n'), ((5677, 5701), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (5690, 5701), False, 'import pytest\n'), ((5718, 5747), 'mmcv.cnn.ConstantInit', 'ConstantInit', ([], {'val': '(1)', 'bias': '"""1"""'}), "(val=1, bias='1')\n", (5730, 5747), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((5783, 5807), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (5796, 5807), False, 'import pytest\n'), ((5824, 5858), 'mmcv.cnn.ConstantInit', 'ConstantInit', ([], {'val': '(1)', 'bias_prob': '"""1"""'}), "(val=1, bias_prob='1')\n", (5836, 5858), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((5896, 5920), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (5909, 5920), False, 'import pytest\n'), ((5937, 5965), 'mmcv.cnn.ConstantInit', 'ConstantInit', ([], {'val': '(1)', 'layer': '(1)'}), '(val=1, layer=1)\n', (5949, 5965), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((6050, 6068), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (6059, 6068), False, 'from torch import nn\n'), ((6070, 6079), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (6077, 6079), False, 'from torch import nn\n'), ((6081, 6096), 'torch.nn.Linear', 'nn.Linear', (['(1)', '(2)'], {}), '(1, 2)\n', (6090, 6096), False, 'from torch import nn\n'), ((6196, 6231), 'torch.full_like', 'torch.full_like', (['model[2].bias', '(0.1)'], {}), '(model[2].bias, 0.1)\n', (6211, 6231), False, 'import torch\n'), ((6532, 6570), 'torch.full', 'torch.full', (['model[0].weight.shape', '(0.0)'], {}), '(model[0].weight.shape, 0.0)\n', (6542, 6570), False, 'import torch\n'), ((6611, 6649), 'torch.full', 'torch.full', (['model[2].weight.shape', '(0.0)'], {}), '(model[2].weight.shape, 0.0)\n', (6621, 6649), False, 'import torch\n'), ((6688, 6724), 'torch.full', 'torch.full', (['model[0].bias.shape', '(0.0)'], {}), '(model[0].bias.shape, 0.0)\n', (6698, 6724), False, 'import torch\n'), ((6763, 6799), 'torch.full', 'torch.full', (['model[2].bias.shape', '(0.0)'], {}), '(model[2].bias.shape, 0.0)\n', (6773, 6799), False, 'import torch\n'), ((7111, 7147), 'torch.full', 'torch.full', (['model[0].bias.shape', 'res'], {}), '(model[0].bias.shape, res)\n', (7121, 7147), False, 'import torch\n'), ((7187, 7223), 'torch.full', 'torch.full', (['model[2].bias.shape', 'res'], {}), '(model[2].bias.shape, res)\n', (7197, 7223), False, 'import torch\n'), ((7294, 7312), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (7303, 7312), False, 'from torch import nn\n'), ((7314, 7323), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (7321, 7323), False, 'from torch import nn\n'), ((7325, 7343), 'torch.nn.Conv1d', 'nn.Conv1d', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (7334, 7343), False, 'from torch import nn\n'), ((7688, 7721), 'torch.all', 'torch.all', (['(model[0].weight == 4.0)'], {}), '(model[0].weight == 4.0)\n', (7697, 7721), False, 'import torch\n'), ((7736, 7769), 'torch.all', 'torch.all', (['(model[2].weight == 4.0)'], {}), '(model[2].weight == 4.0)\n', (7745, 7769), False, 'import torch\n'), ((7892, 7916), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (7905, 7916), False, 'import pytest\n'), ((7933, 7971), 'mmcv.cnn.XavierInit', 'XavierInit', ([], {'bias': '"""0.1"""', 'layer': '"""Conv2d"""'}), "(bias='0.1', layer='Conv2d')\n", (7943, 7971), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((8009, 8033), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (8022, 8033), False, 'import pytest\n'), ((8050, 8079), 'mmcv.cnn.XavierInit', 'XavierInit', ([], {'bias': '(0.1)', 'layer': '(1)'}), '(bias=0.1, layer=1)\n', (8060, 8079), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((8164, 8182), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (8173, 8182), False, 'from torch import nn\n'), ((8184, 8193), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (8191, 8193), False, 'from torch import nn\n'), ((8195, 8210), 'torch.nn.Linear', 'nn.Linear', (['(1)', '(2)'], {}), '(1, 2)\n', (8204, 8210), False, 'from torch import nn\n'), ((8345, 8364), 'torch.tensor', 'torch.tensor', (['(100.0)'], {}), '(100.0)\n', (8357, 8364), False, 'import torch\n'), ((8401, 8420), 'torch.tensor', 'torch.tensor', (['(100.0)'], {}), '(100.0)\n', (8413, 8420), False, 'import torch\n'), ((8455, 8474), 'torch.tensor', 'torch.tensor', (['(200.0)'], {}), '(200.0)\n', (8467, 8474), False, 'import torch\n'), ((8509, 8528), 'torch.tensor', 'torch.tensor', (['(200.0)'], {}), '(200.0)\n', (8521, 8528), False, 'import torch\n'), ((8713, 8732), 'torch.tensor', 'torch.tensor', (['(300.0)'], {}), '(300.0)\n', (8725, 8732), False, 'import torch\n'), ((8769, 8788), 'torch.tensor', 'torch.tensor', (['(300.0)'], {}), '(300.0)\n', (8781, 8788), False, 'import torch\n'), ((8823, 8840), 'torch.tensor', 'torch.tensor', (['res'], {}), '(res)\n', (8835, 8840), False, 'import torch\n'), ((8876, 8893), 'torch.tensor', 'torch.tensor', (['res'], {}), '(res)\n', (8888, 8893), False, 'import torch\n'), ((8964, 8982), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (8973, 8982), False, 'from torch import nn\n'), ((8984, 8993), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (8991, 8993), False, 'from torch import nn\n'), ((8995, 9013), 'torch.nn.Conv1d', 'nn.Conv1d', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (9004, 9013), False, 'from torch import nn\n'), ((9143, 9162), 'torch.tensor', 'torch.tensor', (['(300.0)'], {}), '(300.0)\n', (9155, 9162), False, 'import torch\n'), ((9199, 9218), 'torch.tensor', 'torch.tensor', (['(300.0)'], {}), '(300.0)\n', (9211, 9218), False, 'import torch\n'), ((9399, 9417), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (9408, 9417), False, 'from torch import nn\n'), ((9419, 9428), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (9426, 9428), False, 'from torch import nn\n'), ((9430, 9445), 'torch.nn.Linear', 'nn.Linear', (['(1)', '(2)'], {}), '(1, 2)\n', (9439, 9445), False, 'from torch import nn\n'), ((9606, 9625), 'torch.tensor', 'torch.tensor', (['(100.0)'], {}), '(100.0)\n', (9618, 9625), False, 'import torch\n'), ((9662, 9681), 'torch.tensor', 'torch.tensor', (['(100.0)'], {}), '(100.0)\n', (9674, 9681), False, 'import torch\n'), ((9716, 9735), 'torch.tensor', 'torch.tensor', (['(200.0)'], {}), '(200.0)\n', (9728, 9735), False, 'import torch\n'), ((9770, 9789), 'torch.tensor', 'torch.tensor', (['(200.0)'], {}), '(200.0)\n', (9782, 9789), False, 'import torch\n'), ((10033, 10052), 'torch.tensor', 'torch.tensor', (['(300.0)'], {}), '(300.0)\n', (10045, 10052), False, 'import torch\n'), ((10089, 10108), 'torch.tensor', 'torch.tensor', (['(300.0)'], {}), '(300.0)\n', (10101, 10108), False, 'import torch\n'), ((10143, 10160), 'torch.tensor', 'torch.tensor', (['res'], {}), '(res)\n', (10155, 10160), False, 'import torch\n'), ((10196, 10213), 'torch.tensor', 'torch.tensor', (['res'], {}), '(res)\n', (10208, 10213), False, 'import torch\n'), ((10284, 10302), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (10293, 10302), False, 'from torch import nn\n'), ((10304, 10313), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (10311, 10313), False, 'from torch import nn\n'), ((10315, 10333), 'torch.nn.Conv1d', 'nn.Conv1d', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (10324, 10333), False, 'from torch import nn\n'), ((10491, 10510), 'torch.tensor', 'torch.tensor', (['(300.0)'], {}), '(300.0)\n', (10503, 10510), False, 'import torch\n'), ((10547, 10566), 'torch.tensor', 'torch.tensor', (['(300.0)'], {}), '(300.0)\n', (10559, 10566), False, 'import torch\n'), ((10740, 10758), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (10749, 10758), False, 'from torch import nn\n'), ((10760, 10769), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (10767, 10769), False, 'from torch import nn\n'), ((10771, 10786), 'torch.nn.Linear', 'nn.Linear', (['(1)', '(2)'], {}), '(1, 2)\n', (10780, 10786), False, 'from torch import nn\n'), ((10913, 10951), 'torch.full', 'torch.full', (['model[0].weight.shape', '(1.0)'], {}), '(model[0].weight.shape, 1.0)\n', (10923, 10951), False, 'import torch\n'), ((10992, 11030), 'torch.full', 'torch.full', (['model[2].weight.shape', '(1.0)'], {}), '(model[2].weight.shape, 1.0)\n', (11002, 11030), False, 'import torch\n'), ((11069, 11105), 'torch.full', 'torch.full', (['model[0].bias.shape', '(2.0)'], {}), '(model[0].bias.shape, 2.0)\n', (11079, 11105), False, 'import torch\n'), ((11144, 11180), 'torch.full', 'torch.full', (['model[2].bias.shape', '(2.0)'], {}), '(model[2].bias.shape, 2.0)\n', (11154, 11180), False, 'import torch\n'), ((11312, 11352), 'torch.full', 'torch.full', (['model[0].weight.shape', '(100.0)'], {}), '(model[0].weight.shape, 100.0)\n', (11322, 11352), False, 'import torch\n'), ((11444, 11484), 'torch.full', 'torch.full', (['model[2].weight.shape', '(100.0)'], {}), '(model[2].weight.shape, 100.0)\n', (11454, 11484), False, 'import torch\n'), ((11574, 11611), 'torch.full', 'torch.full', (['model[0].bias.shape', '(10.0)'], {}), '(model[0].bias.shape, 10.0)\n', (11584, 11611), False, 'import torch\n'), ((11650, 11687), 'torch.full', 'torch.full', (['model[2].bias.shape', '(10.0)'], {}), '(model[2].bias.shape, 10.0)\n', (11660, 11687), False, 'import torch\n'), ((11757, 11775), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (11766, 11775), False, 'from torch import nn\n'), ((11777, 11786), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (11784, 11786), False, 'from torch import nn\n'), ((11788, 11806), 'torch.nn.Conv1d', 'nn.Conv1d', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (11797, 11806), False, 'from torch import nn\n'), ((12195, 12213), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (12204, 12213), False, 'from torch import nn\n'), ((12215, 12224), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (12222, 12224), False, 'from torch import nn\n'), ((12226, 12241), 'torch.nn.Linear', 'nn.Linear', (['(1)', '(2)'], {}), '(1, 2)\n', (12235, 12241), False, 'from torch import nn\n'), ((12346, 12382), 'torch.full', 'torch.full', (['model[0].bias.shape', '(0.1)'], {}), '(model[0].bias.shape, 0.1)\n', (12356, 12382), False, 'import torch\n'), ((12679, 12717), 'torch.full', 'torch.full', (['model[0].weight.shape', '(0.0)'], {}), '(model[0].weight.shape, 0.0)\n', (12689, 12717), False, 'import torch\n'), ((12758, 12796), 'torch.full', 'torch.full', (['model[2].weight.shape', '(0.0)'], {}), '(model[2].weight.shape, 0.0)\n', (12768, 12796), False, 'import torch\n'), ((12835, 12871), 'torch.full', 'torch.full', (['model[0].bias.shape', '(0.0)'], {}), '(model[0].bias.shape, 0.0)\n', (12845, 12871), False, 'import torch\n'), ((12910, 12946), 'torch.full', 'torch.full', (['model[2].bias.shape', '(0.0)'], {}), '(model[2].bias.shape, 0.0)\n', (12920, 12946), False, 'import torch\n'), ((13222, 13259), 'torch.full', 'torch.full', (['model[0].bias.shape', '(10.0)'], {}), '(model[0].bias.shape, 10.0)\n', (13232, 13259), False, 'import torch\n'), ((13298, 13335), 'torch.full', 'torch.full', (['model[2].bias.shape', '(10.0)'], {}), '(model[2].bias.shape, 10.0)\n', (13308, 13335), False, 'import torch\n'), ((13405, 13423), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (13414, 13423), False, 'from torch import nn\n'), ((13425, 13434), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (13432, 13434), False, 'from torch import nn\n'), ((13436, 13454), 'torch.nn.Conv1d', 'nn.Conv1d', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (13445, 13454), False, 'from torch import nn\n'), ((13801, 13839), 'torch.full', 'torch.full', (['model[0].weight.shape', '(0.0)'], {}), '(model[0].weight.shape, 0.0)\n', (13811, 13839), False, 'import torch\n'), ((13880, 13918), 'torch.full', 'torch.full', (['model[2].weight.shape', '(0.0)'], {}), '(model[2].weight.shape, 0.0)\n', (13890, 13918), False, 'import torch\n'), ((13957, 13993), 'torch.full', 'torch.full', (['model[0].bias.shape', '(0.0)'], {}), '(model[0].bias.shape, 0.0)\n', (13967, 13993), False, 'import torch\n'), ((14032, 14068), 'torch.full', 'torch.full', (['model[2].bias.shape', '(0.0)'], {}), '(model[2].bias.shape, 0.0)\n', (14042, 14068), False, 'import torch\n'), ((14344, 14381), 'torch.full', 'torch.full', (['model[0].bias.shape', '(10.0)'], {}), '(model[0].bias.shape, 10.0)\n', (14354, 14381), False, 'import torch\n'), ((14420, 14457), 'torch.full', 'torch.full', (['model[2].bias.shape', '(10.0)'], {}), '(model[2].bias.shape, 10.0)\n', (14430, 14457), False, 'import torch\n'), ((14548, 14566), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (14557, 14566), False, 'from torch import nn\n'), ((14568, 14577), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (14575, 14577), False, 'from torch import nn\n'), ((14579, 14594), 'torch.nn.Linear', 'nn.Linear', (['(1)', '(2)'], {}), '(1, 2)\n', (14588, 14594), False, 'from torch import nn\n'), ((14704, 14740), 'torch.full', 'torch.full', (['model[0].bias.shape', '(0.1)'], {}), '(model[0].bias.shape, 0.1)\n', (14714, 14740), False, 'import torch\n'), ((14926, 14941), 'torch.nn.Linear', 'nn.Linear', (['(1)', '(2)'], {}), '(1, 2)\n', (14935, 14941), False, 'from torch import nn\n'), ((14964, 14982), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (14973, 14982), False, 'from torch import nn\n'), ((15007, 15025), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(2)', '(3)'], {}), '(3, 2, 3)\n', (15016, 15025), False, 'from torch import nn\n'), ((15411, 15431), 'tempfile.TemporaryDirectory', 'TemporaryDirectory', ([], {}), '()\n', (15429, 15431), False, 'from tempfile import TemporaryDirectory\n'), ((16453, 16471), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (16462, 16471), False, 'from torch import nn\n'), ((16473, 16482), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (16480, 16482), False, 'from torch import nn\n'), ((16484, 16499), 'torch.nn.Linear', 'nn.Linear', (['(1)', '(2)'], {}), '(1, 2)\n', (16493, 16499), False, 'from torch import nn\n'), ((16700, 16738), 'torch.full', 'torch.full', (['model[0].weight.shape', '(1.0)'], {}), '(model[0].weight.shape, 1.0)\n', (16710, 16738), False, 'import torch\n'), ((16779, 16817), 'torch.full', 'torch.full', (['model[2].weight.shape', '(1.0)'], {}), '(model[2].weight.shape, 1.0)\n', (16789, 16817), False, 'import torch\n'), ((16856, 16892), 'torch.full', 'torch.full', (['model[0].bias.shape', '(2.0)'], {}), '(model[0].bias.shape, 2.0)\n', (16866, 16892), False, 'import torch\n'), ((16931, 16967), 'torch.full', 'torch.full', (['model[2].bias.shape', '(2.0)'], {}), '(model[2].bias.shape, 2.0)\n', (16941, 16967), False, 'import torch\n'), ((17319, 17357), 'torch.full', 'torch.full', (['model[0].weight.shape', '(1.0)'], {}), '(model[0].weight.shape, 1.0)\n', (17329, 17357), False, 'import torch\n'), ((17398, 17436), 'torch.full', 'torch.full', (['model[2].weight.shape', '(3.0)'], {}), '(model[2].weight.shape, 3.0)\n', (17408, 17436), False, 'import torch\n'), ((17475, 17511), 'torch.full', 'torch.full', (['model[0].bias.shape', '(2.0)'], {}), '(model[0].bias.shape, 2.0)\n', (17485, 17511), False, 'import torch\n'), ((17550, 17586), 'torch.full', 'torch.full', (['model[2].bias.shape', '(4.0)'], {}), '(model[2].bias.shape, 4.0)\n', (17560, 17586), False, 'import torch\n'), ((18066, 18109), 'torch.full', 'torch.full', (['foonet.linear.weight.shape', '(1.0)'], {}), '(foonet.linear.weight.shape, 1.0)\n', (18076, 18109), False, 'import torch\n'), ((18176, 18217), 'torch.full', 'torch.full', (['foonet.linear.bias.shape', '(2.0)'], {}), '(foonet.linear.bias.shape, 2.0)\n', (18186, 18217), False, 'import torch\n'), ((18286, 18329), 'torch.full', 'torch.full', (['foonet.conv2d.weight.shape', '(1.0)'], {}), '(foonet.conv2d.weight.shape, 1.0)\n', (18296, 18329), False, 'import torch\n'), ((18396, 18437), 'torch.full', 'torch.full', (['foonet.conv2d.bias.shape', '(2.0)'], {}), '(foonet.conv2d.bias.shape, 2.0)\n', (18406, 18437), False, 'import torch\n'), ((18508, 18553), 'torch.full', 'torch.full', (['foonet.conv2d_2.weight.shape', '(3.0)'], {}), '(foonet.conv2d_2.weight.shape, 3.0)\n', (18518, 18553), False, 'import torch\n'), ((18622, 18665), 'torch.full', 'torch.full', (['foonet.conv2d_2.bias.shape', '(4.0)'], {}), '(foonet.conv2d_2.bias.shape, 4.0)\n', (18632, 18665), False, 'import torch\n'), ((19552, 19597), 'torch.full', 'torch.full', (['foonet.conv2d_2.weight.shape', '(5.0)'], {}), '(foonet.conv2d_2.weight.shape, 5.0)\n', (19562, 19597), False, 'import torch\n'), ((19666, 19709), 'torch.full', 'torch.full', (['foonet.conv2d_2.bias.shape', '(6.0)'], {}), '(foonet.conv2d_2.bias.shape, 6.0)\n', (19676, 19709), False, 'import torch\n'), ((20107, 20127), 'tempfile.TemporaryDirectory', 'TemporaryDirectory', ([], {}), '()\n', (20125, 20127), False, 'from tempfile import TemporaryDirectory\n'), ((20191, 20219), 'mmcv.cnn.initialize', 'initialize', (['foonet', 'init_cfg'], {}), '(foonet, init_cfg)\n', (20201, 20219), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((21132, 21156), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (21145, 21156), False, 'import pytest\n'), ((21196, 21224), 'mmcv.cnn.initialize', 'initialize', (['foonet', 'init_cfg'], {}), '(foonet, init_cfg)\n', (21206, 21224), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((21266, 21290), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (21279, 21290), False, 'import pytest\n'), ((21462, 21490), 'mmcv.cnn.initialize', 'initialize', (['foonet', 'init_cfg'], {}), '(foonet, init_cfg)\n', (21472, 21490), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((21526, 21553), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (21539, 21553), False, 'import pytest\n'), ((21772, 21800), 'mmcv.cnn.initialize', 'initialize', (['foonet', 'init_cfg'], {}), '(foonet, init_cfg)\n', (21782, 21800), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((21841, 21868), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (21854, 21868), False, 'import pytest\n'), ((22188, 22216), 'mmcv.cnn.initialize', 'initialize', (['foonet', 'init_cfg'], {}), '(foonet, init_cfg)\n', (22198, 22216), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((22273, 22298), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (22286, 22298), False, 'import pytest\n'), ((22460, 22488), 'mmcv.cnn.initialize', 'initialize', (['foonet', 'init_cfg'], {}), '(foonet, init_cfg)\n', (22470, 22488), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((22532, 22557), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (22545, 22557), False, 'import pytest\n'), ((22719, 22747), 'mmcv.cnn.initialize', 'initialize', (['foonet', 'init_cfg'], {}), '(foonet, init_cfg)\n', (22729, 22747), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((2364, 2415), 'scipy.stats.kstest', 'stats.kstest', (['z_samples', '"""truncnorm"""'], {'args': '(a0, b0)'}), "(z_samples, 'truncnorm', args=(a0, b0))\n", (2376, 2415), False, 'from scipy import stats\n'), ((4073, 4097), 'mmcv.cnn.bias_init_with_prob', 'bias_init_with_prob', (['(0.1)'], {}), '(0.1)\n', (4092, 4097), False, 'from mmcv.cnn import Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit, PretrainedInit, TruncNormalInit, UniformInit, XavierInit, bias_init_with_prob, caffe2_xavier_init, constant_init, initialize, kaiming_init, normal_init, trunc_normal_init, uniform_init, xavier_init\n'), ((4181, 4218), 'numpy.log', 'np.log', (['((1 - prior_prob) / prior_prob)'], {}), '((1 - prior_prob) / prior_prob)\n', (4187, 4218), True, 'import numpy as np\n'), ((4730, 4768), 'torch.full', 'torch.full', (['model[2].weight.shape', '(1.0)'], {}), '(model[2].weight.shape, 1.0)\n', (4740, 4768), False, 'import torch\n'), ((4811, 4847), 'torch.full', 'torch.full', (['model[2].bias.shape', '(2.0)'], {}), '(model[2].bias.shape, 2.0)\n', (4821, 4847), False, 'import torch\n'), ((6271, 6306), 'torch.full_like', 'torch.full_like', (['model[0].bias', '(0.1)'], {}), '(model[0].bias, 0.1)\n', (6286, 6306), False, 'import torch\n'), ((6924, 6962), 'torch.full', 'torch.full', (['model[0].weight.shape', '(0.0)'], {}), '(model[0].weight.shape, 0.0)\n', (6934, 6962), False, 'import torch\n'), ((7034, 7072), 'torch.full', 'torch.full', (['model[2].weight.shape', '(0.0)'], {}), '(model[2].weight.shape, 0.0)\n', (7044, 7072), False, 'import torch\n'), ((12426, 12462), 'torch.full', 'torch.full', (['model[2].bias.shape', '(0.1)'], {}), '(model[2].bias.shape, 0.1)\n', (12436, 12462), False, 'import torch\n'), ((13035, 13073), 'torch.full', 'torch.full', (['model[0].weight.shape', '(0.0)'], {}), '(model[0].weight.shape, 0.0)\n', (13045, 13073), False, 'import torch\n'), ((13145, 13183), 'torch.full', 'torch.full', (['model[2].weight.shape', '(0.0)'], {}), '(model[2].weight.shape, 0.0)\n', (13155, 13183), False, 'import torch\n'), ((14157, 14195), 'torch.full', 'torch.full', (['model[0].weight.shape', '(0.0)'], {}), '(model[0].weight.shape, 0.0)\n', (14167, 14195), False, 'import torch\n'), ((14267, 14305), 'torch.full', 'torch.full', (['model[2].weight.shape', '(0.0)'], {}), '(model[2].weight.shape, 0.0)\n', (14277, 14305), False, 'import torch\n'), ((14784, 14820), 'torch.full', 'torch.full', (['model[2].bias.shape', '(0.1)'], {}), '(model[2].bias.shape, 0.1)\n', (14794, 14820), False, 'import torch\n'), ((15585, 15628), 'torch.full', 'torch.full', (['modelB.linear.weight.shape', '(1.0)'], {}), '(modelB.linear.weight.shape, 1.0)\n', (15595, 15628), False, 'import torch\n'), ((15703, 15744), 'torch.full', 'torch.full', (['modelB.linear.bias.shape', '(2.0)'], {}), '(modelB.linear.bias.shape, 2.0)\n', (15713, 15744), False, 'import torch\n'), ((15821, 15864), 'torch.full', 'torch.full', (['modelB.conv2d.weight.shape', '(1.0)'], {}), '(modelB.conv2d.weight.shape, 1.0)\n', (15831, 15864), False, 'import torch\n'), ((15939, 15980), 'torch.full', 'torch.full', (['modelB.conv2d.bias.shape', '(2.0)'], {}), '(modelB.conv2d.bias.shape, 2.0)\n', (15949, 15980), False, 'import torch\n'), ((16059, 16104), 'torch.full', 'torch.full', (['modelB.conv2d_2.weight.shape', '(1.0)'], {}), '(modelB.conv2d_2.weight.shape, 1.0)\n', (16069, 16104), False, 'import torch\n'), ((16181, 16224), 'torch.full', 'torch.full', (['modelB.conv2d_2.bias.shape', '(2.0)'], {}), '(modelB.conv2d_2.bias.shape, 2.0)\n', (16191, 16224), False, 'import torch\n'), ((16290, 16326), 'torch.full', 'torch.full', (['modelC.weight.shape', '(1.0)'], {}), '(modelC.weight.shape, 1.0)\n', (16300, 16326), False, 'import torch\n'), ((16367, 16401), 'torch.full', 'torch.full', (['modelC.bias.shape', '(2.0)'], {}), '(modelC.bias.shape, 2.0)\n', (16377, 16401), False, 'import torch\n'), ((19086, 19129), 'torch.full', 'torch.full', (['foonet.linear.weight.shape', '(5.0)'], {}), '(foonet.linear.weight.shape, 5.0)\n', (19096, 19129), False, 'import torch\n'), ((19204, 19245), 'torch.full', 'torch.full', (['foonet.linear.bias.shape', '(6.0)'], {}), '(foonet.linear.bias.shape, 6.0)\n', (19214, 19245), False, 'import torch\n'), ((19322, 19365), 'torch.full', 'torch.full', (['foonet.conv2d.weight.shape', '(5.0)'], {}), '(foonet.conv2d.weight.shape, 5.0)\n', (19332, 19365), False, 'import torch\n'), ((19440, 19481), 'torch.full', 'torch.full', (['foonet.conv2d.bias.shape', '(6.0)'], {}), '(foonet.conv2d.bias.shape, 6.0)\n', (19450, 19481), False, 'import torch\n'), ((20296, 20339), 'torch.full', 'torch.full', (['foonet.linear.weight.shape', '(1.0)'], {}), '(foonet.linear.weight.shape, 1.0)\n', (20306, 20339), False, 'import torch\n'), ((20414, 20455), 'torch.full', 'torch.full', (['foonet.linear.bias.shape', '(2.0)'], {}), '(foonet.linear.bias.shape, 2.0)\n', (20424, 20455), False, 'import torch\n'), ((20532, 20575), 'torch.full', 'torch.full', (['foonet.conv2d.weight.shape', '(1.0)'], {}), '(foonet.conv2d.weight.shape, 1.0)\n', (20542, 20575), False, 'import torch\n'), ((20650, 20691), 'torch.full', 'torch.full', (['foonet.conv2d.bias.shape', '(2.0)'], {}), '(foonet.conv2d.bias.shape, 2.0)\n', (20660, 20691), False, 'import torch\n'), ((20770, 20815), 'torch.full', 'torch.full', (['foonet.conv2d_2.weight.shape', '(3.0)'], {}), '(foonet.conv2d_2.weight.shape, 3.0)\n', (20780, 20815), False, 'import torch\n'), ((20892, 20935), 'torch.full', 'torch.full', (['foonet.conv2d_2.bias.shape', '(4.0)'], {}), '(foonet.conv2d_2.bias.shape, 4.0)\n', (20902, 20935), False, 'import torch\n'), ((1989, 2004), 'random.random', 'random.random', ([], {}), '()\n', (2002, 2004), False, 'import random\n')] |
import numpy
import clarity.IO as io;
def writePoints(filename, points, **args):
"""Write point data to csv file
Arguments:
filename (str): file name
points (array): point data
Returns:
str: file name
"""
numpy.savetxt(filename, points, delimiter=',', newline='\n', fmt='%.5e')
return filename
def readPoints(filename, **args):
"""Read point data to csv file
Arguments:
filename (str): file name
**args: arguments for :func:`~clarity.IO.pointsToRange`
Returns:
str: file name
"""
points = numpy.loadtxt(filename, delimiter=',');
return io.pointsToRange(points, **args);
| [
"numpy.savetxt",
"numpy.loadtxt",
"clarity.IO.pointsToRange"
] | [((267, 339), 'numpy.savetxt', 'numpy.savetxt', (['filename', 'points'], {'delimiter': '""","""', 'newline': '"""\n"""', 'fmt': '"""%.5e"""'}), "(filename, points, delimiter=',', newline='\\n', fmt='%.5e')\n", (280, 339), False, 'import numpy\n'), ((616, 654), 'numpy.loadtxt', 'numpy.loadtxt', (['filename'], {'delimiter': '""","""'}), "(filename, delimiter=',')\n", (629, 654), False, 'import numpy\n'), ((667, 699), 'clarity.IO.pointsToRange', 'io.pointsToRange', (['points'], {}), '(points, **args)\n', (683, 699), True, 'import clarity.IO as io\n')] |
from collections import Counter
import json
import os
import time
import numpy as np
import pickle
from ray import tune
from ray.tune.durable_trainable import DurableTrainable
class ProgressCallback(tune.callback.Callback):
def __init__(self):
self.last_update = 0
self.update_interval = 60
def on_step_end(self, iteration, trials, **kwargs):
if time.time() - self.last_update > self.update_interval:
now = time.time()
result = {
"last_update": now,
"iteration": iteration,
"trial_states": dict(
Counter([trial.status for trial in trials])),
}
test_output_json = os.environ.get("TEST_OUTPUT_JSON",
"/tmp/release_test.json")
with open(test_output_json, "wt") as f:
json.dump(result, f)
self.last_update = now
class TestDurableTrainable(DurableTrainable):
def __init__(self, remote_checkpoint_dir, config, logger_creator=None):
self.setup_env()
super(TestDurableTrainable, self).__init__(
remote_checkpoint_dir,
config=config,
logger_creator=logger_creator)
def setup_env(self):
pass
def setup(self, config):
self._num_iters = int(config["num_iters"])
self._sleep_time = config["sleep_time"]
self._score = config["score"]
self._checkpoint_iters = config["checkpoint_iters"]
self._checkpoint_size_b = config["checkpoint_size_b"]
self._checkpoint_num_items = self._checkpoint_size_b // 8 # np.float64
self._iter = 0
def step(self):
if self._iter > 0:
time.sleep(self._sleep_time)
res = dict(score=self._iter + self._score)
if self._iter >= self._num_iters:
res["done"] = True
self._iter += 1
return res
def save_checkpoint(self, tmp_checkpoint_dir):
checkpoint_file = os.path.join(tmp_checkpoint_dir, "bogus.ckpt")
checkpoint_data = np.random.uniform(
0, 1, size=self._checkpoint_num_items)
with open(checkpoint_file, "wb") as fp:
pickle.dump(checkpoint_data, fp)
return checkpoint_file
def load_checkpoint(self, checkpoint):
pass
def function_trainable(config):
num_iters = int(config["num_iters"])
sleep_time = config["sleep_time"]
score = config["score"]
checkpoint_iters = config["checkpoint_iters"]
checkpoint_size_b = config["checkpoint_size_b"]
checkpoint_num_items = checkpoint_size_b // 8 # np.float64
for i in range(num_iters):
if checkpoint_iters >= 0 and checkpoint_size_b > 0 and \
i % checkpoint_iters == 0:
with tune.checkpoint_dir(step=i) as dir:
checkpoint_file = os.path.join(dir, "bogus.ckpt")
checkpoint_data = np.random.uniform(
0, 1, size=checkpoint_num_items)
with open(checkpoint_file, "wb") as fp:
pickle.dump(checkpoint_data, fp)
tune.report(score=i + score)
time.sleep(sleep_time)
def timed_tune_run(name: str,
num_samples: int,
results_per_second: int = 1,
trial_length_s: int = 1,
max_runtime: int = 300,
checkpoint_freq_s: int = -1,
checkpoint_size_b: int = 0,
**tune_kwargs):
durable = "sync_config" in tune_kwargs and \
tune_kwargs["sync_config"].upload_dir and \
tune_kwargs["sync_config"].upload_dir.startswith("s3://")
sleep_time = 1. / results_per_second
num_iters = int(trial_length_s / sleep_time)
checkpoint_iters = -1
if checkpoint_freq_s >= 0:
checkpoint_iters = int(checkpoint_freq_s / sleep_time)
config = {
"score": tune.uniform(0., 1.),
"num_iters": num_iters,
"sleep_time": sleep_time,
"checkpoint_iters": checkpoint_iters,
"checkpoint_size_b": checkpoint_size_b,
}
print(f"Starting benchmark with config: {config}")
run_kwargs = {"reuse_actors": True, "verbose": 2}
run_kwargs.update(tune_kwargs)
_train = function_trainable
aws_key_id = os.getenv("AWS_ACCESS_KEY_ID", "")
aws_secret = os.getenv("AWS_SECRET_ACCESS_KEY", "")
aws_session = os.getenv("AWS_SESSION_TOKEN", "")
if durable:
class AwsDurableTrainable(TestDurableTrainable):
AWS_ACCESS_KEY_ID = aws_key_id
AWS_SECRET_ACCESS_KEY = aws_secret
AWS_SESSION_TOKEN = aws_session
def setup_env(self):
if self.AWS_ACCESS_KEY_ID:
os.environ["AWS_ACCESS_KEY_ID"] = self.AWS_ACCESS_KEY_ID
if self.AWS_SECRET_ACCESS_KEY:
os.environ[
"AWS_SECRET_ACCESS_KEY"] = self.AWS_SECRET_ACCESS_KEY
if self.AWS_SESSION_TOKEN:
os.environ["AWS_SESSION_TOKEN"] = self.AWS_SESSION_TOKEN
if all(
os.getenv(k, "") for k in [
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY",
"AWS_SESSION_TOKEN",
]):
print("Worker: AWS secrets found in env.")
else:
print("Worker: No AWS secrets found in env!")
_train = AwsDurableTrainable
run_kwargs["checkpoint_freq"] = checkpoint_iters
start_time = time.monotonic()
analysis = tune.run(
_train,
config=config,
num_samples=num_samples,
raise_on_failed_trial=False,
**run_kwargs)
time_taken = time.monotonic() - start_time
result = {
"time_taken": time_taken,
"trial_states": dict(
Counter([trial.status for trial in analysis.trials])),
"last_update": time.time()
}
test_output_json = os.environ.get("TEST_OUTPUT_JSON",
"/tmp/tune_test.json")
with open(test_output_json, "wt") as f:
json.dump(result, f)
if time_taken > max_runtime:
print(f"The {name} test took {time_taken:.2f} seconds, but should not "
f"have exceeded {max_runtime:.2f} seconds. Test failed. \n\n"
f"--- FAILED: {name.upper()} ::: "
f"{time_taken:.2f} > {max_runtime:.2f} ---")
else:
print(f"The {name} test took {time_taken:.2f} seconds, which "
f"is below the budget of {max_runtime:.2f} seconds. "
f"Test successful. \n\n"
f"--- PASSED: {name.upper()} ::: "
f"{time_taken:.2f} <= {max_runtime:.2f} ---")
| [
"numpy.random.uniform",
"ray.tune.uniform",
"json.dump",
"pickle.dump",
"ray.tune.report",
"ray.tune.run",
"os.environ.get",
"time.sleep",
"time.time",
"time.monotonic",
"ray.tune.checkpoint_dir",
"collections.Counter",
"os.path.join",
"os.getenv"
] | [((4338, 4372), 'os.getenv', 'os.getenv', (['"""AWS_ACCESS_KEY_ID"""', '""""""'], {}), "('AWS_ACCESS_KEY_ID', '')\n", (4347, 4372), False, 'import os\n'), ((4390, 4428), 'os.getenv', 'os.getenv', (['"""AWS_SECRET_ACCESS_KEY"""', '""""""'], {}), "('AWS_SECRET_ACCESS_KEY', '')\n", (4399, 4428), False, 'import os\n'), ((4447, 4481), 'os.getenv', 'os.getenv', (['"""AWS_SESSION_TOKEN"""', '""""""'], {}), "('AWS_SESSION_TOKEN', '')\n", (4456, 4481), False, 'import os\n'), ((5642, 5658), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (5656, 5658), False, 'import time\n'), ((5674, 5777), 'ray.tune.run', 'tune.run', (['_train'], {'config': 'config', 'num_samples': 'num_samples', 'raise_on_failed_trial': '(False)'}), '(_train, config=config, num_samples=num_samples,\n raise_on_failed_trial=False, **run_kwargs)\n', (5682, 5777), False, 'from ray import tune\n'), ((6074, 6131), 'os.environ.get', 'os.environ.get', (['"""TEST_OUTPUT_JSON"""', '"""/tmp/tune_test.json"""'], {}), "('TEST_OUTPUT_JSON', '/tmp/tune_test.json')\n", (6088, 6131), False, 'import os\n'), ((2026, 2072), 'os.path.join', 'os.path.join', (['tmp_checkpoint_dir', '"""bogus.ckpt"""'], {}), "(tmp_checkpoint_dir, 'bogus.ckpt')\n", (2038, 2072), False, 'import os\n'), ((2099, 2155), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {'size': 'self._checkpoint_num_items'}), '(0, 1, size=self._checkpoint_num_items)\n', (2116, 2155), True, 'import numpy as np\n'), ((3136, 3164), 'ray.tune.report', 'tune.report', ([], {'score': '(i + score)'}), '(score=i + score)\n', (3147, 3164), False, 'from ray import tune\n'), ((3173, 3195), 'time.sleep', 'time.sleep', (['sleep_time'], {}), '(sleep_time)\n', (3183, 3195), False, 'import time\n'), ((3953, 3975), 'ray.tune.uniform', 'tune.uniform', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (3965, 3975), False, 'from ray import tune\n'), ((5832, 5848), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (5846, 5848), False, 'import time\n'), ((6032, 6043), 'time.time', 'time.time', ([], {}), '()\n', (6041, 6043), False, 'import time\n'), ((6222, 6242), 'json.dump', 'json.dump', (['result', 'f'], {}), '(result, f)\n', (6231, 6242), False, 'import json\n'), ((457, 468), 'time.time', 'time.time', ([], {}), '()\n', (466, 468), False, 'import time\n'), ((717, 777), 'os.environ.get', 'os.environ.get', (['"""TEST_OUTPUT_JSON"""', '"""/tmp/release_test.json"""'], {}), "('TEST_OUTPUT_JSON', '/tmp/release_test.json')\n", (731, 777), False, 'import os\n'), ((1749, 1777), 'time.sleep', 'time.sleep', (['self._sleep_time'], {}), '(self._sleep_time)\n', (1759, 1777), False, 'import time\n'), ((2229, 2261), 'pickle.dump', 'pickle.dump', (['checkpoint_data', 'fp'], {}), '(checkpoint_data, fp)\n', (2240, 2261), False, 'import pickle\n'), ((5954, 6006), 'collections.Counter', 'Counter', (['[trial.status for trial in analysis.trials]'], {}), '([trial.status for trial in analysis.trials])\n', (5961, 6006), False, 'from collections import Counter\n'), ((384, 395), 'time.time', 'time.time', ([], {}), '()\n', (393, 395), False, 'import time\n'), ((892, 912), 'json.dump', 'json.dump', (['result', 'f'], {}), '(result, f)\n', (901, 912), False, 'import json\n'), ((2810, 2837), 'ray.tune.checkpoint_dir', 'tune.checkpoint_dir', ([], {'step': 'i'}), '(step=i)\n', (2829, 2837), False, 'from ray import tune\n'), ((2880, 2911), 'os.path.join', 'os.path.join', (['dir', '"""bogus.ckpt"""'], {}), "(dir, 'bogus.ckpt')\n", (2892, 2911), False, 'import os\n'), ((2946, 2996), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {'size': 'checkpoint_num_items'}), '(0, 1, size=checkpoint_num_items)\n', (2963, 2996), True, 'import numpy as np\n'), ((626, 669), 'collections.Counter', 'Counter', (['[trial.status for trial in trials]'], {}), '([trial.status for trial in trials])\n', (633, 669), False, 'from collections import Counter\n'), ((3094, 3126), 'pickle.dump', 'pickle.dump', (['checkpoint_data', 'fp'], {}), '(checkpoint_data, fp)\n', (3105, 3126), False, 'import pickle\n'), ((5171, 5187), 'os.getenv', 'os.getenv', (['k', '""""""'], {}), "(k, '')\n", (5180, 5187), False, 'import os\n')] |
import numpy as np
from scipy import signal, sparse
import matplotlib.pyplot as plt
from matplotlib import animation, rc
from matplotlib.collections import LineCollection
from matplotlib.gridspec import GridSpec
from sklearn import preprocessing
from scipy.spatial import distance
#------------------------------------------------------------
# Target functions
def periodic(t, amp=3., freq=1/300):
"""Generates a periodic function which a sum of 4 sinusoids.
"""
return amp*np.sin(np.pi*freq*t) + (amp/2) * np.sin(2*np.pi*freq*t) + (amp/3) * np.sin(3*np.pi*freq*t) + (amp/4) * np.sin(4*np.pi*freq*t)
periodic = np.vectorize(periodic)
def triangle(t, freq=1/600, amp=3):
"""Generates a triangle-wave function.
"""
return amp*signal.sawtooth(2*np.pi*freq*t, 0.5)
triangle = np.vectorize(triangle)
def cos_fun(t, amp=3., freq=1/300):
"""Generates a cos function.
"""
return amp*np.cos(np.pi*freq*t)
cos_fun = np.vectorize(cos_fun)
def complicated_periodic(t, amp=1., freq=1/300, seed=1):
"""Generates a complicated periodic function which a sum of 10 sinusoids.
"""
np.random.seed(seed)
amps = np.random.randint(1, 5, size=(6,))
freqs = np.random.randint(1, 10, size=(6,))
return sum(am*amp*np.sin(fr*np.pi*freq*t) for am, fr in zip(amps, freqs))
complicated_periodic = np.vectorize(complicated_periodic)
def both(f, g):
"""Generates the function \\\(t ⟼ (f(t), g(t))\\\)
"""
return (lambda t: np.array([f(t), g(t)]) if isinstance(t, float) else np.array(list(zip(f(t), g(t)))))
per_tri = both(periodic, triangle)
def triple(f, g, h):
"""Generates the function \\\(t ⟼ (f(t), g(t), h(t))\\\)
"""
return (lambda t: np.array([f(t), g(t), h(t)]) if isinstance(t, float) else np.array(list(zip(f(t), g(t), h(t)))))
per_tri_cos = triple(periodic, triangle, cos_fun)
#------------------------------------------------------------
# General utility functions
def add_collection_curves(ax, ts, data, labels=None, color='indigo',
y_lim=None, starting_points=None, Δ=None):
"""
Adds a collection of curves a matplotlib ax.
"""
# the plot limits need to be set (no autoscale!)
ax.set_xlim(np.min(ts), np.max(ts))
min_data, max_data = data.min(), data.max()
if Δ is None:
Δ = 0.7*(max_data - min_data)
if y_lim is None:
ax.set_ylim(min_data, max_data+Δ*(len(data)-1))
else:
ax.set_ylim(y_lim[0], y_lim[1]+Δ*(len(data)-1))
curves = [np.column_stack((ts, curve)) for curve in data]
ticks_positions = Δ*np.arange(len(data))
offsets = np.column_stack((np.zeros(len(data)), ticks_positions))
ax.add_collection(LineCollection(curves, offsets=offsets, colors=color))
if labels is not None:
ax.set_yticks(ticks_positions+data[:,0])
ax.set_yticklabels(labels)
ax.tick_params(axis='y', colors=color)
def draw_axis_lines(ax, positions):
if 'right' in positions or 'left' in positions:
ax.yaxis.set_ticks_position('left') if 'left' in positions else ax.yaxis.set_ticks_position('right')
else:
ax.yaxis.set_ticks([])
ax.xaxis.set_ticks_position('bottom') if 'bottom' in positions else ax.xaxis.set_ticks([])
for pos in ax.spines.keys():
ax.spines[pos].set_position(('outward',7)) if pos in positions else ax.spines[pos].set_color('none')
#------------------------------------------------------------
# Dimension reduction functions
#------------------------------------------------------------
# PCA to compute the degrees of freedom
def PCA(data, nb_eig=8, return_matrix=True, return_eigenvalues=True):
"""
Principal Component Analysis (PCA) to compute the ``nb_eig`` leading principal components.
Parameters
----------
data : (n, k) array
Data points matrix (data points = row vectors in the matrix)
nb_eig : int, optional
Number of leading principal components returned
return_matrix : bool, optional
If True, returns the matrix of the data points projection on the eigenvectors
return_eigenvalues : bool, optional
Returns the eigenvalues.
Returns
-------
(k, nb_eig) array
Leading principal components/eigenvectors (columnwise).
Proj : (t_max, N_G) array
If return_matrix == True: Projection of the data points on the principal eigenvectors.
"""
# Covariance matrix
cov_matrix = np.cov(preprocessing.scale(data.T))
# Diagonalization of the covariance matrix
eig_val, eig_vec = np.linalg.eigh(cov_matrix)
if return_matrix or return_eigenvalues:
if return_matrix:
# Projection of the data points over the eigenvectors
Proj = data.dot(eig_vec[:,-nb_eig:])
if return_matrix and return_eigenvalues:
return eig_vec[:,-nb_eig:], Proj, eig_val
elif return_matrix:
return eig_vec[:,-nb_eig:], Proj
else:
return eig_vec[:,-nb_eig:], eig_val
return eig_vec[:,-nb_eig:] | [
"matplotlib.collections.LineCollection",
"numpy.random.seed",
"numpy.vectorize",
"sklearn.preprocessing.scale",
"scipy.signal.sawtooth",
"numpy.linalg.eigh",
"numpy.min",
"numpy.random.randint",
"numpy.max",
"numpy.sin",
"numpy.cos",
"numpy.column_stack"
] | [((628, 650), 'numpy.vectorize', 'np.vectorize', (['periodic'], {}), '(periodic)\n', (640, 650), True, 'import numpy as np\n'), ((802, 824), 'numpy.vectorize', 'np.vectorize', (['triangle'], {}), '(triangle)\n', (814, 824), True, 'import numpy as np\n'), ((949, 970), 'numpy.vectorize', 'np.vectorize', (['cos_fun'], {}), '(cos_fun)\n', (961, 970), True, 'import numpy as np\n'), ((1335, 1369), 'numpy.vectorize', 'np.vectorize', (['complicated_periodic'], {}), '(complicated_periodic)\n', (1347, 1369), True, 'import numpy as np\n'), ((1119, 1139), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1133, 1139), True, 'import numpy as np\n'), ((1151, 1185), 'numpy.random.randint', 'np.random.randint', (['(1)', '(5)'], {'size': '(6,)'}), '(1, 5, size=(6,))\n', (1168, 1185), True, 'import numpy as np\n'), ((1198, 1233), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)'], {'size': '(6,)'}), '(1, 10, size=(6,))\n', (1215, 1233), True, 'import numpy as np\n'), ((5452, 5478), 'numpy.linalg.eigh', 'np.linalg.eigh', (['cov_matrix'], {}), '(cov_matrix)\n', (5466, 5478), True, 'import numpy as np\n'), ((754, 796), 'scipy.signal.sawtooth', 'signal.sawtooth', (['(2 * np.pi * freq * t)', '(0.5)'], {}), '(2 * np.pi * freq * t, 0.5)\n', (769, 796), False, 'from scipy import signal, sparse\n'), ((918, 942), 'numpy.cos', 'np.cos', (['(np.pi * freq * t)'], {}), '(np.pi * freq * t)\n', (924, 942), True, 'import numpy as np\n'), ((2220, 2230), 'numpy.min', 'np.min', (['ts'], {}), '(ts)\n', (2226, 2230), True, 'import numpy as np\n'), ((2232, 2242), 'numpy.max', 'np.max', (['ts'], {}), '(ts)\n', (2238, 2242), True, 'import numpy as np\n'), ((2525, 2553), 'numpy.column_stack', 'np.column_stack', (['(ts, curve)'], {}), '((ts, curve))\n', (2540, 2553), True, 'import numpy as np\n'), ((2720, 2773), 'matplotlib.collections.LineCollection', 'LineCollection', (['curves'], {'offsets': 'offsets', 'colors': 'color'}), '(curves, offsets=offsets, colors=color)\n', (2734, 2773), False, 'from matplotlib.collections import LineCollection\n'), ((5352, 5379), 'sklearn.preprocessing.scale', 'preprocessing.scale', (['data.T'], {}), '(data.T)\n', (5371, 5379), False, 'from sklearn import preprocessing\n'), ((594, 622), 'numpy.sin', 'np.sin', (['(4 * np.pi * freq * t)'], {}), '(4 * np.pi * freq * t)\n', (600, 622), True, 'import numpy as np\n'), ((559, 587), 'numpy.sin', 'np.sin', (['(3 * np.pi * freq * t)'], {}), '(3 * np.pi * freq * t)\n', (565, 587), True, 'import numpy as np\n'), ((1256, 1285), 'numpy.sin', 'np.sin', (['(fr * np.pi * freq * t)'], {}), '(fr * np.pi * freq * t)\n', (1262, 1285), True, 'import numpy as np\n'), ((491, 515), 'numpy.sin', 'np.sin', (['(np.pi * freq * t)'], {}), '(np.pi * freq * t)\n', (497, 515), True, 'import numpy as np\n'), ((524, 552), 'numpy.sin', 'np.sin', (['(2 * np.pi * freq * t)'], {}), '(2 * np.pi * freq * t)\n', (530, 552), True, 'import numpy as np\n')] |
# Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GALINI IO logging."""
import logging
from pathlib import Path
import h5py
import numpy as np
from galini.io.message import (
text_message,
tensor_message,
solve_start_message,
solve_end_message,
update_variable_message,
add_bab_node_message,
prune_bab_node_message,
)
from galini.io.writer import MessageWriter
CRITICAL = logging.CRITICAL
ERROR = logging.ERROR
WARNING = logging.WARNING
INFO = logging.INFO
DEBUG = logging.DEBUG
NOTSET = logging.NOTSET
class LogManager(object):
"""LogManager class for rich log messages.
If `directory` is `None`, then rich logging will be disabled.
This object keeps referenecs to the Python logger and output
files, but does not provide any method to write to them.
Instantiate a child logger for each solver/run instead.
Parameters
----------
config : dict-like
logging configuration
"""
def __init__(self, config=None):
self.config = config
self.has_rich_logging = False
self._loggers = {}
self.apply_config(config)
def apply_config(self, config):
"""Apply config to logger."""
if config is None:
config = {}
level_name = config.get('level', 'INFO')
if isinstance(level_name, str):
level_name = logging.getLevelName(level_name)
self._update_log_level(level_name)
# delegate some logs to python logging module
self._pylogger = logging.Logger(__name__)
self._pylogger.setLevel(self.level)
if config.get('stdout', False):
stream_handler = logging.StreamHandler()
self._pylogger.addHandler(stream_handler)
if config.get('file') is not None:
file_handler = logging.FileHandler(config['file'])
self._pylogger.addHandler(file_handler)
self._setup_message_log(config)
def file_path(self, filename):
"""Full path for filename inside logger output dir.
Parameters
----------
filename : string
file name
Returns
-------
path or None
Returns None if rich logging is disabled
"""
if not self.has_rich_logging:
return None
path = self.directory / filename
return str(path)
def get_logger(self, name):
if name in self._loggers:
return self._loggers[name]
else:
logger = Logger(name, manager=self, level=self.level)
self._loggers[name] = logger
return logger
def _update_log_level(self, level):
self.level = level
for logger in self._loggers.values():
logger.level = level
def _setup_message_log(self, config):
directory = config.get('directory', None)
if not directory:
self.has_rich_logging = False
return
self.has_rich_logging = True
self.directory = Path(directory)
if not self.directory.exists():
self.directory.mkdir(exist_ok=True)
self.messages_file = open(self.directory / 'messages.bin', 'wb')
self.writer = MessageWriter(self.messages_file)
self.data_filename = 'data.hdf5'
self.data_filepath = str(self.directory / self.data_filename)
# Avoid exception about already open file when
# re-applying config
if getattr(self, 'data_file', None):
self.data_file.close()
self.data_file = h5py.File(self.data_filepath, 'w')
def _log_message(self, message):
if not self.has_rich_logging:
return
self.writer.write(message)
def _log(self, name, run_id, lvl, msg, *args, **kwargs):
if lvl < self.level:
return
fmt_msg = msg.format(*args, **kwargs)
# scrip newline because it's added by pylogger
if fmt_msg[-1] == '\n':
pylog_fmt_msg = fmt_msg[:-1]
else:
pylog_fmt_msg = fmt_msg
self._pylogger.log(
lvl,
'[{}][{}] {}'.format(name, run_id, pylog_fmt_msg),
)
message = text_message(name, run_id, fmt_msg, level=lvl)
self._log_message(message)
def _tensor(self, name, run_id, group, dataset, data):
if not self.has_rich_logging:
return
group = '{}/{}/{}'.format(name, run_id, group)
if group is None:
h5_group = self.data_file
else:
if group not in self.data_file:
self.data_file.create_group(group)
h5_group = self.data_file[group]
if dataset not in h5_group:
data = np.array(data, dtype=np.float)
h5_group.create_dataset(dataset, data=data)
message = tensor_message(
name,
run_id,
filename=self.data_filepath,
group=group,
dataset=dataset,
sizes=np.shape(data),
)
self._log_message(message)
def __del__(self):
if self.has_rich_logging:
try:
self.messages_file.close()
self.data_file.close()
except:
pass
class Logger(object):
def __init__(self, name, manager, level=None):
self.name = name
self.manager = manager
if level is None:
level = INFO
self.level = level
def is_debug(self):
return self.level <= DEBUG
def log_message(self, message):
"""Log message to disk."""
self.manager._log_message(message)
def debug(self, run_id, msg, *args, **kwargs):
"""Log msg with DEBUG level."""
return self.log(run_id, DEBUG, msg, *args, **kwargs)
def info(self, run_id, msg, *args, **kwargs):
"""Log msg with INFO level."""
return self.log(run_id, INFO, msg, *args, **kwargs)
def warning(self, run_id, msg, *args, **kwargs):
"""Log msg with WARNING level."""
return self.log(run_id, WARNING, msg, *args, **kwargs)
def error(self, run_id, msg, *args, **kwargs):
"""Log msg with ERROR level."""
return self.log(run_id, ERROR, msg, *args, **kwargs)
def log(self, run_id, lvl, msg, *args, **kwargs):
"""Log msg with lvl level and unique run id.
Arguments
---------
run_id : str
run_id used to collate logs
lvl: int
logging level
msg: str
format string
args: Any
arguments passed to msg.format
kwargs: Any
keyword arguments passed to msg.format
"""
if lvl >= self.level:
self.manager._log(self.name, run_id, lvl, msg, *args, **kwargs)
def log_solve_start(self, run_id, solver):
self.log_message(solve_start_message(
name=self.name,
run_id=run_id,
solver=solver,
))
def log_solve_end(self, run_id, solver):
self.log_message(solve_end_message(
name=self.name,
run_id=run_id,
solver=solver,
))
def log_add_bab_node(self, run_id, coordinate, lower_bound, upper_bound,
branching_variables=None):
self.log_message(add_bab_node_message(
name=self.name,
run_id=run_id,
coordinate=coordinate,
lower_bound=lower_bound,
upper_bound=upper_bound,
branching_variables=branching_variables,
))
def log_prune_bab_node(self, run_id, coordinate):
self.log_message(prune_bab_node_message(
name=self.name,
run_id=run_id,
coordinate=coordinate,
))
def update_variable(self, run_id, var_name, iteration, value):
self.log_message(update_variable_message(
name=self.name,
run_id=run_id,
var_name=var_name,
iteration=iteration,
value=value,
))
def tensor(self, run_id, group, dataset, data):
"""Log tensor data to data file, if configured.
Arguments
---------
group : string
dataset group
dataset : string
dataset name
data : array-like
the data to log
"""
return self.manager._tensor(self.name, run_id, group, dataset, data)
| [
"galini.io.writer.MessageWriter",
"h5py.File",
"logging.FileHandler",
"galini.io.message.add_bab_node_message",
"galini.io.message.update_variable_message",
"logging.StreamHandler",
"galini.io.message.text_message",
"galini.io.message.prune_bab_node_message",
"logging.Logger",
"numpy.shape",
"pa... | [((2039, 2063), 'logging.Logger', 'logging.Logger', (['__name__'], {}), '(__name__)\n', (2053, 2063), False, 'import logging\n'), ((3527, 3542), 'pathlib.Path', 'Path', (['directory'], {}), '(directory)\n', (3531, 3542), False, 'from pathlib import Path\n'), ((3726, 3759), 'galini.io.writer.MessageWriter', 'MessageWriter', (['self.messages_file'], {}), '(self.messages_file)\n', (3739, 3759), False, 'from galini.io.writer import MessageWriter\n'), ((4060, 4094), 'h5py.File', 'h5py.File', (['self.data_filepath', '"""w"""'], {}), "(self.data_filepath, 'w')\n", (4069, 4094), False, 'import h5py\n'), ((4696, 4742), 'galini.io.message.text_message', 'text_message', (['name', 'run_id', 'fmt_msg'], {'level': 'lvl'}), '(name, run_id, fmt_msg, level=lvl)\n', (4708, 4742), False, 'from galini.io.message import text_message, tensor_message, solve_start_message, solve_end_message, update_variable_message, add_bab_node_message, prune_bab_node_message\n'), ((1883, 1915), 'logging.getLevelName', 'logging.getLevelName', (['level_name'], {}), '(level_name)\n', (1903, 1915), False, 'import logging\n'), ((2177, 2200), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (2198, 2200), False, 'import logging\n'), ((2326, 2361), 'logging.FileHandler', 'logging.FileHandler', (["config['file']"], {}), "(config['file'])\n", (2345, 2361), False, 'import logging\n'), ((5223, 5253), 'numpy.array', 'np.array', (['data'], {'dtype': 'np.float'}), '(data, dtype=np.float)\n', (5231, 5253), True, 'import numpy as np\n'), ((7404, 7469), 'galini.io.message.solve_start_message', 'solve_start_message', ([], {'name': 'self.name', 'run_id': 'run_id', 'solver': 'solver'}), '(name=self.name, run_id=run_id, solver=solver)\n', (7423, 7469), False, 'from galini.io.message import text_message, tensor_message, solve_start_message, solve_end_message, update_variable_message, add_bab_node_message, prune_bab_node_message\n'), ((7589, 7652), 'galini.io.message.solve_end_message', 'solve_end_message', ([], {'name': 'self.name', 'run_id': 'run_id', 'solver': 'solver'}), '(name=self.name, run_id=run_id, solver=solver)\n', (7606, 7652), False, 'from galini.io.message import text_message, tensor_message, solve_start_message, solve_end_message, update_variable_message, add_bab_node_message, prune_bab_node_message\n'), ((7856, 8030), 'galini.io.message.add_bab_node_message', 'add_bab_node_message', ([], {'name': 'self.name', 'run_id': 'run_id', 'coordinate': 'coordinate', 'lower_bound': 'lower_bound', 'upper_bound': 'upper_bound', 'branching_variables': 'branching_variables'}), '(name=self.name, run_id=run_id, coordinate=coordinate,\n lower_bound=lower_bound, upper_bound=upper_bound, branching_variables=\n branching_variables)\n', (7876, 8030), False, 'from galini.io.message import text_message, tensor_message, solve_start_message, solve_end_message, update_variable_message, add_bab_node_message, prune_bab_node_message\n'), ((8186, 8262), 'galini.io.message.prune_bab_node_message', 'prune_bab_node_message', ([], {'name': 'self.name', 'run_id': 'run_id', 'coordinate': 'coordinate'}), '(name=self.name, run_id=run_id, coordinate=coordinate)\n', (8208, 8262), False, 'from galini.io.message import text_message, tensor_message, solve_start_message, solve_end_message, update_variable_message, add_bab_node_message, prune_bab_node_message\n'), ((8404, 8515), 'galini.io.message.update_variable_message', 'update_variable_message', ([], {'name': 'self.name', 'run_id': 'run_id', 'var_name': 'var_name', 'iteration': 'iteration', 'value': 'value'}), '(name=self.name, run_id=run_id, var_name=var_name,\n iteration=iteration, value=value)\n', (8427, 8515), False, 'from galini.io.message import text_message, tensor_message, solve_start_message, solve_end_message, update_variable_message, add_bab_node_message, prune_bab_node_message\n'), ((5523, 5537), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (5531, 5537), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
# # Finetuning FakeNewsAAAI
# FakeNewsAAAI is a Fake News dataset with 2 possible labels: `real` and `fake`
# In[1]:
import os, sys
import re
import argparse
import random
import numpy as np
import pandas as pd
import torch
from torch import optim
import torch.nn.functional as F
from tqdm import tqdm
from transformers import AutoModelForSequenceClassification, AutoConfig, AutoTokenizer
from utils.forward_fn import forward_sequence_classification
from utils.metrics import classification_metrics_fn
from utils.data_utils import FakeNewsDataset, FakeNewsDataLoader
from loss import *
###
# common functions
###
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def count_param(module, trainable=False):
if trainable:
return sum(p.numel() for p in module.parameters() if p.requires_grad)
else:
return sum(p.numel() for p in module.parameters())
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
def metrics_to_string(metric_dict):
string_list = []
for key, value in metric_dict.items():
string_list.append('{}:{:.4f}'.format(key, value))
return ' '.join(string_list)
# Train
def evaluate(args, model, valid_loader, result_path):
if args.loss == 'SCE':
criterion = SCELoss()
elif args.loss == 'GCE':
criterion = GCELoss()
elif args.loss == 'CL':
criterion = CLoss()
# Evaluate on validation
model.eval()
torch.set_grad_enabled(False)
total_loss, total_correct, total_labels = 0, 0, 0
list_hyp, list_label = [], []
pbar = tqdm(valid_loader, leave=True, total=len(valid_loader))
for i, batch_data in enumerate(pbar):
batch_seq = batch_data[-1]
ce_loss, batch_hyp, batch_label, logits, labels = forward_sequence_classification(model, batch_data[1:-1], i2w=i2w, device='cuda')
if args.loss == 'CE':
loss = ce_loss
else:
loss = criterion(logits.view(-1, 2), labels.view(-1))
# Calculate total loss
valid_loss = loss.item()
total_loss = total_loss + valid_loss
# Calculate evaluation metrics
list_hyp += batch_hyp
list_label += batch_label
metrics = classification_metrics_fn(list_hyp, list_label)
pbar.set_description("VALID LOSS:{:.4f} {}".format(total_loss/(i+1), metrics_to_string(metrics)))
metrics = classification_metrics_fn(list_hyp, list_label)
print("VALID LOSS:{:.4f} {}".format(total_loss/(i+1), metrics_to_string(metrics)))
with open(result_path, 'w') as f:
f.write("VALID LOSS:{:.4f} {}".format(total_loss/(i+1), metrics_to_string(metrics)))
def test(args, model, valid_loader, result_path):
# Evaluate on validation
model.eval()
torch.set_grad_enabled(False)
list_hyp, list_ids = [], []
pbar = tqdm(valid_loader, leave=True, total=len(valid_loader))
for i, batch_data in enumerate(pbar):
batch_ids = batch_data[0]
batch_hyp, logits = forward_sequence_classification(model, batch_data[1:-1], i2w=i2w, is_test=True, device='cuda')
# Calculate evaluation metrics
list_hyp += batch_hyp
list_ids += batch_ids
with open(result_path, 'w') as f:
print('writing')
f.write('id,label')
for id, pre in zip(list_ids, list_hyp):
f.write('\n'+str(id)+','+pre)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model_name_or_path', type=str, default='roberta-large')
parser.add_argument('--per_gpu_eval_batch_size', type=int, default=16)
parser.add_argument('--loss', type=str, default='CE')
parser.add_argument('--test', type=bool, default=False)
args = parser.parse_args()
print(args)
# args = Args()
# Set random seed
set_seed(26092020)
# # Fine Tuning & Evaluation
for model_path in ['/home/jiziwei/FakeNews/math6380/save/roberta_finetune.CE.1e-6/roberta-large-CE3.pt']:
# Load Tokenizer and Config
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)
config = AutoConfig.from_pretrained(args.model_name_or_path)
config.num_labels = FakeNewsDataset.NUM_LABELS
# test_dataset_path = '/home/jiziwei/FakeNews/math6380/data/covid19_infodemic_english_data/processed_covid19_infodemic_english_data2.tsv'
test_dataset_path = '/home/jiziwei/FakeNews/math6380/data/valid.tsv'
# test_dataset_path = '/home/jiziwei/FakeNews/math6380/data/Constraint_English_Test.tsv'
# Instantiate model
model = AutoModelForSequenceClassification.from_pretrained(args.model_name_or_path, config=config)
model.load_state_dict(torch.load(model_path))
model = model.cuda()
if args.test:
test_dataset = FakeNewsDataset(tokenizer, dataset_path=test_dataset_path, lowercase=False, is_test=True)
test_loader = FakeNewsDataLoader(dataset=test_dataset, max_seq_len=512, batch_size=args.per_gpu_eval_batch_size, num_workers=8, shuffle=False, is_test=True)
w2i, i2w = FakeNewsDataset.LABEL2INDEX, FakeNewsDataset.INDEX2LABEL
ans_path = re.sub(model_path.split('/')[-1], '', model_path)
test(args, model, test_loader, ans_path+'answer3.txt')
else:
test_dataset = FakeNewsDataset(tokenizer, dataset_path=test_dataset_path, lowercase=False)
test_loader = FakeNewsDataLoader(dataset=test_dataset, max_seq_len=512, batch_size=args.per_gpu_eval_batch_size, num_workers=8, shuffle=False)
w2i, i2w = FakeNewsDataset.LABEL2INDEX, FakeNewsDataset.INDEX2LABEL
ans_path = re.sub(model_path.split('/')[-1], '', model_path)
evaluate(args, model, test_loader, ans_path+'result.txt')
| [
"transformers.AutoConfig.from_pretrained",
"numpy.random.seed",
"argparse.ArgumentParser",
"torch.manual_seed",
"torch.load",
"torch.cuda.manual_seed",
"utils.data_utils.FakeNewsDataLoader",
"utils.metrics.classification_metrics_fn",
"transformers.AutoTokenizer.from_pretrained",
"random.seed",
"... | [((681, 698), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (692, 698), False, 'import random\n'), ((703, 723), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (717, 723), True, 'import numpy as np\n'), ((728, 751), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (745, 751), False, 'import torch\n'), ((756, 784), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (778, 784), False, 'import torch\n'), ((1603, 1632), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (1625, 1632), False, 'import torch\n'), ((2553, 2600), 'utils.metrics.classification_metrics_fn', 'classification_metrics_fn', (['list_hyp', 'list_label'], {}), '(list_hyp, list_label)\n', (2578, 2600), False, 'from utils.metrics import classification_metrics_fn\n'), ((2934, 2963), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (2956, 2963), False, 'import torch\n'), ((3646, 3671), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3669, 3671), False, 'import argparse\n'), ((1933, 2018), 'utils.forward_fn.forward_sequence_classification', 'forward_sequence_classification', (['model', 'batch_data[1:-1]'], {'i2w': 'i2w', 'device': '"""cuda"""'}), "(model, batch_data[1:-1], i2w=i2w, device='cuda'\n )\n", (1964, 2018), False, 'from utils.forward_fn import forward_sequence_classification\n'), ((2383, 2430), 'utils.metrics.classification_metrics_fn', 'classification_metrics_fn', (['list_hyp', 'list_label'], {}), '(list_hyp, list_label)\n', (2408, 2430), False, 'from utils.metrics import classification_metrics_fn\n'), ((3176, 3275), 'utils.forward_fn.forward_sequence_classification', 'forward_sequence_classification', (['model', 'batch_data[1:-1]'], {'i2w': 'i2w', 'is_test': '(True)', 'device': '"""cuda"""'}), "(model, batch_data[1:-1], i2w=i2w, is_test=\n True, device='cuda')\n", (3207, 3275), False, 'from utils.forward_fn import forward_sequence_classification\n'), ((4290, 4344), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['args.model_name_or_path'], {}), '(args.model_name_or_path)\n', (4319, 4344), False, 'from transformers import AutoModelForSequenceClassification, AutoConfig, AutoTokenizer\n'), ((4362, 4413), 'transformers.AutoConfig.from_pretrained', 'AutoConfig.from_pretrained', (['args.model_name_or_path'], {}), '(args.model_name_or_path)\n', (4388, 4413), False, 'from transformers import AutoModelForSequenceClassification, AutoConfig, AutoTokenizer\n'), ((4840, 4934), 'transformers.AutoModelForSequenceClassification.from_pretrained', 'AutoModelForSequenceClassification.from_pretrained', (['args.model_name_or_path'], {'config': 'config'}), '(args.model_name_or_path,\n config=config)\n', (4890, 4934), False, 'from transformers import AutoModelForSequenceClassification, AutoConfig, AutoTokenizer\n'), ((4961, 4983), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (4971, 4983), False, 'import torch\n'), ((5064, 5157), 'utils.data_utils.FakeNewsDataset', 'FakeNewsDataset', (['tokenizer'], {'dataset_path': 'test_dataset_path', 'lowercase': '(False)', 'is_test': '(True)'}), '(tokenizer, dataset_path=test_dataset_path, lowercase=False,\n is_test=True)\n', (5079, 5157), False, 'from utils.data_utils import FakeNewsDataset, FakeNewsDataLoader\n'), ((5180, 5327), 'utils.data_utils.FakeNewsDataLoader', 'FakeNewsDataLoader', ([], {'dataset': 'test_dataset', 'max_seq_len': '(512)', 'batch_size': 'args.per_gpu_eval_batch_size', 'num_workers': '(8)', 'shuffle': '(False)', 'is_test': '(True)'}), '(dataset=test_dataset, max_seq_len=512, batch_size=args.\n per_gpu_eval_batch_size, num_workers=8, shuffle=False, is_test=True)\n', (5198, 5327), False, 'from utils.data_utils import FakeNewsDataset, FakeNewsDataLoader\n'), ((5585, 5660), 'utils.data_utils.FakeNewsDataset', 'FakeNewsDataset', (['tokenizer'], {'dataset_path': 'test_dataset_path', 'lowercase': '(False)'}), '(tokenizer, dataset_path=test_dataset_path, lowercase=False)\n', (5600, 5660), False, 'from utils.data_utils import FakeNewsDataset, FakeNewsDataLoader\n'), ((5687, 5820), 'utils.data_utils.FakeNewsDataLoader', 'FakeNewsDataLoader', ([], {'dataset': 'test_dataset', 'max_seq_len': '(512)', 'batch_size': 'args.per_gpu_eval_batch_size', 'num_workers': '(8)', 'shuffle': '(False)'}), '(dataset=test_dataset, max_seq_len=512, batch_size=args.\n per_gpu_eval_batch_size, num_workers=8, shuffle=False)\n', (5705, 5820), False, 'from utils.data_utils import FakeNewsDataset, FakeNewsDataLoader\n')] |
"""
A simple SVC model, for reference please see
https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html
I am only using five pickle parameters as feaures, in principle
more features can be used and one can also generate features on the
go using the data passed in to the Model.
"""
import numpy as np
from sklearn.ensemble import RandomForestClassifier
try:
import pickle
except ImportError:
import cPickle as pickle
from mlpipe import Model
class RFModel(Model):
name = "RandomForest"
def __init__(self, n_estimators=10, max_depth=5, random_state=0):
self.model = RandomForestClassifier(n_estimators=n_estimators,
max_depth=max_depth,
random_state=random_state)
self.name = self.name + '-' + str(n_estimators)
self.features = ['corrLive', 'rmsLive', 'kurtLive', 'DELive',
'MFELive', 'skewLive', 'normLive', 'darkRatioLive',
'jumpLive', 'gainLive', 'feat1', 'feat2', 'feat3']
def train(self, data, labels, metadata):
features = np.hstack([metadata[key] for key in self.features])
self.model.fit(features, labels)
def validate(self, data, labels, metadata):
features = np.hstack([metadata[key] for key in self.features])
prediction = self.model.predict(features)
prediction_prob = self.model.predict_proba(features)
return prediction, prediction_prob
def save(self, filename):
with open(filename, 'wb') as f:
pickle.dump(self.model, f, protocol=pickle.HIGHEST_PROTOCOL)
| [
"sklearn.ensemble.RandomForestClassifier",
"cPickle.dump",
"numpy.hstack"
] | [((614, 715), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': 'n_estimators', 'max_depth': 'max_depth', 'random_state': 'random_state'}), '(n_estimators=n_estimators, max_depth=max_depth,\n random_state=random_state)\n', (636, 715), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1153, 1204), 'numpy.hstack', 'np.hstack', (['[metadata[key] for key in self.features]'], {}), '([metadata[key] for key in self.features])\n', (1162, 1204), True, 'import numpy as np\n'), ((1314, 1365), 'numpy.hstack', 'np.hstack', (['[metadata[key] for key in self.features]'], {}), '([metadata[key] for key in self.features])\n', (1323, 1365), True, 'import numpy as np\n'), ((1607, 1667), 'cPickle.dump', 'pickle.dump', (['self.model', 'f'], {'protocol': 'pickle.HIGHEST_PROTOCOL'}), '(self.model, f, protocol=pickle.HIGHEST_PROTOCOL)\n', (1618, 1667), True, 'import cPickle as pickle\n')] |
#DISCLAIRMER: ESTE CODIGO ES A MODO DE EJEMPLO DIDÁCTICO, NO CONTIENE CONTROL DE ERRORES, NI SOFISTICACIONES, NI MEJORAS DE
# PERFORMANCE. TODOS LOS USOS DE LIBRERIAS EXTERNAS PUEDEN SER MEJORADAS EN SU IMPLEMENTACIÓN.
# ===================================================================================
import matplotlib.pyplot as plt
import numpy as np
import csv
from osgeo import gdal,ogr,osr
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
# ARCHIVOS A UTILIZAR
# ==================================================================================
workdir="/home/alfredo/Escritorio/desafiosAgTech2020/"
image_name = workdir+"S2_20200117_B020304081112.tif"
train_csv_name = workdir+"data_train_r.csv"
test_csv_name = workdir+"data_test_r.csv"
# ABRO LA IMAGEN RASTER
# ==================================================================================
raster_ds=gdal.Open(image_name)
raster_gt=raster_ds.GetGeoTransform()
raster_dataPixel=np.zeros((raster_ds.RasterYSize,raster_ds.RasterXSize,raster_ds.RasterCount,))
for i in range(raster_ds.RasterCount):
banddataraster = raster_ds.GetRasterBand(i+1)
raster_dataPixel[:,:,i]= banddataraster.ReadAsArray(0,0, raster_ds.RasterXSize, raster_ds.RasterYSize).astype(np.float)
# ABRO LOS PUNTOS DE ENTRENAMIENTO Y LOS DE TESTEO
# ==================================================================================
train_SR = osr.SpatialReference()
train_SR.ImportFromEPSG(4326)
target_SR = osr.SpatialReference()
target_SR.ImportFromWkt(raster_ds.GetProjectionRef())
puntos_train=list()
puntos_test=list()
with open(train_csv_name, newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if (row['Campania']=='19/20'):
point = ogr.Geometry(ogr.wkbPoint)
point.AddPoint(float(row['Latitud']),float(row['Longitud']))
coordTrans = osr.CoordinateTransformation(train_SR,target_SR)
point.Transform(coordTrans)
transf_x,transf_y=point.GetX(), point.GetY()
px = int((transf_x - raster_gt[0]) / raster_gt[1]) #x pixel
py = int((transf_y - raster_gt[3]) / raster_gt[5]) #y pixel
puntos_train.append({'lat':row['Latitud'],'lon':row['Longitud'],'px':px,'py':py,'cultivo':row['Cultivo'],'camp':row['Campania']})
with open(test_csv_name, newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if (row['Campania']=='19/20'):
point = ogr.Geometry(ogr.wkbPoint)
point.AddPoint(float(row['Latitud']),float(row['Longitud']))
coordTrans = osr.CoordinateTransformation(train_SR,target_SR)
point.Transform(coordTrans)
transf_x,transf_y=point.GetX(), point.GetY()
px = int((transf_x - raster_gt[0]) / raster_gt[1]) #x pixel
py = int((transf_y - raster_gt[3]) / raster_gt[5]) #y pixel
puntos_test.append({'lat':row['Latitud'],'lon':row['Longitud'],'px':px,'py':py,'cultivo':row['Cultivo'],'camp':row['Campania']})
# OBTENGO LOS VALORES DE LOS PIXELES
# =================================================================================
valores_pixeles_entrenamiento = np.asarray([raster_dataPixel[d['py'],d['px'],:] for d in puntos_train])
clase_entrenamiento = [d['cultivo'] for d in puntos_train]
# <NAME>
# ==================================================================================
classifier = RandomForestClassifier(n_estimators=5)
classifier.fit(valores_pixeles_entrenamiento,clase_entrenamiento)
puntos_predichos = [classifier.predict([raster_dataPixel[p['py'],p['px'],:]]) for p in puntos_test]
img_reshape = np.reshape(raster_dataPixel,(raster_ds.RasterYSize*raster_ds.RasterXSize,raster_ds.RasterCount),order='C')
puntos_predichos = np.array(classifier.predict(img_reshape))
# RECLASIFICO, JUNTO LOS MAIZ (1ra y 2da), LAS SOJAS (1ra y 2da), y OTROS (lo otro)
# ==================================================================================
img_clasif_num = np.zeros((raster_ds.RasterXSize*raster_ds.RasterYSize))
img_clasif_num[puntos_predichos=='M']=3
img_clasif_num[puntos_predichos=='S']=2
img_clasif_num[puntos_predichos=='m']=3
img_clasif_num[puntos_predichos=='s']=2
img_clasif_num[puntos_predichos=='B']=1
img_clasif_num = np.reshape(img_clasif_num,(raster_ds.RasterYSize,raster_ds.RasterXSize))
plt.imshow(np.array(img_clasif_num,dtype='int'),cmap='jet')
plt.colorbar()
plt.show()
| [
"sklearn.ensemble.RandomForestClassifier",
"matplotlib.pyplot.show",
"csv.DictReader",
"numpy.asarray",
"numpy.zeros",
"osgeo.osr.CoordinateTransformation",
"matplotlib.pyplot.colorbar",
"numpy.array",
"numpy.reshape",
"osgeo.ogr.Geometry",
"osgeo.gdal.Open",
"osgeo.osr.SpatialReference"
] | [((930, 951), 'osgeo.gdal.Open', 'gdal.Open', (['image_name'], {}), '(image_name)\n', (939, 951), False, 'from osgeo import gdal, ogr, osr\n'), ((1008, 1087), 'numpy.zeros', 'np.zeros', (['(raster_ds.RasterYSize, raster_ds.RasterXSize, raster_ds.RasterCount)'], {}), '((raster_ds.RasterYSize, raster_ds.RasterXSize, raster_ds.RasterCount))\n', (1016, 1087), True, 'import numpy as np\n'), ((1449, 1471), 'osgeo.osr.SpatialReference', 'osr.SpatialReference', ([], {}), '()\n', (1469, 1471), False, 'from osgeo import gdal, ogr, osr\n'), ((1514, 1536), 'osgeo.osr.SpatialReference', 'osr.SpatialReference', ([], {}), '()\n', (1534, 1536), False, 'from osgeo import gdal, ogr, osr\n'), ((3274, 3347), 'numpy.asarray', 'np.asarray', (["[raster_dataPixel[d['py'], d['px'], :] for d in puntos_train]"], {}), "([raster_dataPixel[d['py'], d['px'], :] for d in puntos_train])\n", (3284, 3347), True, 'import numpy as np\n'), ((3515, 3553), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(5)'}), '(n_estimators=5)\n', (3537, 3553), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((3737, 3852), 'numpy.reshape', 'np.reshape', (['raster_dataPixel', '(raster_ds.RasterYSize * raster_ds.RasterXSize, raster_ds.RasterCount)'], {'order': '"""C"""'}), "(raster_dataPixel, (raster_ds.RasterYSize * raster_ds.RasterXSize,\n raster_ds.RasterCount), order='C')\n", (3747, 3852), True, 'import numpy as np\n'), ((4093, 4148), 'numpy.zeros', 'np.zeros', (['(raster_ds.RasterXSize * raster_ds.RasterYSize)'], {}), '(raster_ds.RasterXSize * raster_ds.RasterYSize)\n', (4101, 4148), True, 'import numpy as np\n'), ((4367, 4441), 'numpy.reshape', 'np.reshape', (['img_clasif_num', '(raster_ds.RasterYSize, raster_ds.RasterXSize)'], {}), '(img_clasif_num, (raster_ds.RasterYSize, raster_ds.RasterXSize))\n', (4377, 4441), True, 'import numpy as np\n'), ((4502, 4516), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (4514, 4516), True, 'import matplotlib.pyplot as plt\n'), ((4517, 4527), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4525, 4527), True, 'import matplotlib.pyplot as plt\n'), ((1695, 1718), 'csv.DictReader', 'csv.DictReader', (['csvfile'], {}), '(csvfile)\n', (1709, 1718), False, 'import csv\n'), ((2455, 2478), 'csv.DictReader', 'csv.DictReader', (['csvfile'], {}), '(csvfile)\n', (2469, 2478), False, 'import csv\n'), ((4453, 4490), 'numpy.array', 'np.array', (['img_clasif_num'], {'dtype': '"""int"""'}), "(img_clasif_num, dtype='int')\n", (4461, 4490), True, 'import numpy as np\n'), ((1805, 1831), 'osgeo.ogr.Geometry', 'ogr.Geometry', (['ogr.wkbPoint'], {}), '(ogr.wkbPoint)\n', (1817, 1831), False, 'from osgeo import gdal, ogr, osr\n'), ((1938, 1987), 'osgeo.osr.CoordinateTransformation', 'osr.CoordinateTransformation', (['train_SR', 'target_SR'], {}), '(train_SR, target_SR)\n', (1966, 1987), False, 'from osgeo import gdal, ogr, osr\n'), ((2561, 2587), 'osgeo.ogr.Geometry', 'ogr.Geometry', (['ogr.wkbPoint'], {}), '(ogr.wkbPoint)\n', (2573, 2587), False, 'from osgeo import gdal, ogr, osr\n'), ((2686, 2735), 'osgeo.osr.CoordinateTransformation', 'osr.CoordinateTransformation', (['train_SR', 'target_SR'], {}), '(train_SR, target_SR)\n', (2714, 2735), False, 'from osgeo import gdal, ogr, osr\n')] |
import numpy as np
import param
from ..core import util
from ..core import Dimension, Dataset, Element2D
from ..core.data import GridInterface
class Chart(Dataset, Element2D):
"""
The data held within Chart is a numpy array of shape (N, D),
where N is the number of samples and D the number of dimensions.
Chart Elements are sliceable along up to two key dimensions.
The data may be supplied in one of three formats:
1) As a numpy array of shape (N, D).
2) As a list of length N containing tuples of length D.
3) As a tuple of length D containing iterables of length N.
"""
kdims = param.List(default=[Dimension('x')], bounds=(1,2), doc="""
The key dimensions of the Chart, determining the number of
indexable dimensions.""")
group = param.String(default='Chart', constant=True)
vdims = param.List(default=[Dimension('y')], bounds=(1,None), doc="""
The value dimensions of the Chart, usually corresponding to a
number of dependent variables.""")
# Enables adding index if 1D array like data is supplied
_auto_indexable_1d = True
def __getitem__(self, index):
sliced = super(Chart, self).__getitem__(index)
if not isinstance(sliced, Chart):
return sliced
if not isinstance(index, tuple): index = (index,)
ndims = len(self.extents)//2
lower_bounds, upper_bounds = [None]*ndims, [None]*ndims
for i, slc in enumerate(index[:ndims]):
if isinstance(slc, slice):
lbound = self.extents[i]
ubound = self.extents[ndims:][i]
lower_bounds[i] = lbound if slc.start is None else slc.start
upper_bounds[i] = ubound if slc.stop is None else slc.stop
sliced.extents = tuple(lower_bounds+upper_bounds)
return sliced
class Scatter(Chart):
"""
Scatter is a Element2D type which gets displayed as a number of
disconnected points.
"""
group = param.String(default='Scatter', constant=True)
class Curve(Chart):
"""
Curve is a simple Chart Element providing 1D indexing along
the x-axis.
"""
group = param.String(default='Curve', constant=True)
class ErrorBars(Chart):
"""
ErrorBars is a Chart Element type representing any number of
errorbars situated in a 2D space. The errors must be supplied
as an Nx3 or Nx4 array representing the x/y-positions and
either the symmetric error or asymmetric errors respectively.
"""
group = param.String(default='ErrorBars', constant=True, doc="""
A string describing the quantity measured by the ErrorBars
object.""")
kdims = param.List(default=[Dimension('x')],
bounds=(1, 2), constant=True, doc="""
The Dimensions corresponding to the x- and y-positions of
the error bars.""")
vdims = param.List(default=[Dimension('y'), Dimension('yerror')],
bounds=(1, 3), constant=True)
def range(self, dim, data_range=True, dimension_range=True):
didx = self.get_dimension_index(dim)
dim = self.get_dimension(dim)
if didx == 1 and data_range and len(self):
mean = self.dimension_values(1)
neg_error = self.dimension_values(2)
if len(self.dimensions()) > 3:
pos_error = self.dimension_values(3)
else:
pos_error = neg_error
lower = np.nanmin(mean-neg_error)
upper = np.nanmax(mean+pos_error)
if not dimension_range:
return (lower, upper)
return util.dimension_range(lower, upper, dim.range, dim.soft_range)
return super(ErrorBars, self).range(dim, data_range)
class Spread(ErrorBars):
"""
Spread is a Chart Element type representing a spread of
values as given by a mean and standard error or confidence
intervals. Just like the ErrorBars Element type, mean and
deviations from the mean should be supplied as either an
Nx3 or Nx4 array representing the x-values, mean values
and symmetric or asymmetric errors respective. Internally
the data is always expanded to an Nx4 array.
"""
group = param.String(default='Spread', constant=True)
class Bars(Chart):
"""
Bars is an Element type, representing a number of stacked and
grouped bars, depending the dimensionality of the key and value
dimensions. Bars is useful for categorical data, which may be
laid via groups, categories and stacks.
"""
group = param.String(default='Bars', constant=True)
kdims = param.List(default=[Dimension('x')], bounds=(1,3))
vdims = param.List(default=[Dimension('y')], bounds=(1, None))
class Histogram(Chart):
"""
Histogram contains a number of bins, which are defined by the
upper and lower bounds of their edges and the computed bin values.
"""
datatype = param.List(default=['grid'])
group = param.String(default='Histogram', constant=True)
kdims = param.List(default=[Dimension('x')], bounds=(1,1), doc="""
Dimensions on Element2Ds determine the number of indexable
dimensions.""")
vdims = param.List(default=[Dimension('Frequency')], bounds=(1,1))
_binned = True
def __init__(self, data, edges=None, **params):
if edges is not None:
self.warning("Histogram edges should be supplied as a tuple "
"along with the values, passing the edges will "
"be deprecated in holoviews 2.0.")
data = (edges, data)
elif isinstance(data, tuple) and len(data) == 2 and len(data[0])+1 == len(data[1]):
data = data[::-1]
super(Histogram, self).__init__(data, **params)
def __setstate__(self, state):
"""
Ensures old-style Histogram types without an interface can be unpickled.
Note: Deprecate as part of 2.0
"""
if 'interface' not in state:
self.interface = GridInterface
x, y = state['_kdims_param_value'][0], state['_vdims_param_value'][0]
state['data'] = {x.name: state['data'][1], y.name: state['data'][0]}
super(Dataset, self).__setstate__(state)
@property
def values(self):
"Property to access the Histogram values provided for backward compatibility"
return self.dimension_values(1)
@property
def edges(self):
"Property to access the Histogram edges provided for backward compatibility"
return self.interface.coords(self, self.kdims[0], edges=True)
class Points(Chart):
"""
Allows sets of points to be positioned over a sheet coordinate
system. Each points may optionally be associated with a chosen
numeric value.
The input data can be a Nx2 or Nx3 Numpy array where the first two
columns corresponds to the X,Y coordinates in sheet coordinates,
within the declared bounding region. For Nx3 arrays, the third
column corresponds to the magnitude values of the points. Any
additional columns will be ignored (use VectorFields instead).
The input data may be also be passed as a tuple of elements that
may be numpy arrays or values that can be cast to arrays. When
such a tuple is supplied, the elements are joined column-wise into
a single array, allowing the magnitudes to be easily supplied
separately.
Note that if magnitudes are to be rendered correctly by default,
they should lie in the range [0,1].
"""
kdims = param.List(default=[Dimension('x'), Dimension('y')],
bounds=(2, 2), constant=True, doc="""
The label of the x- and y-dimension of the Points in form
of a string or dimension object.""")
group = param.String(default='Points', constant=True)
vdims = param.List(default=[])
_min_dims = 2 # Minimum number of columns
class VectorField(Points):
"""
A VectorField contains is a collection of vectors where each
vector has an associated position in sheet coordinates.
The constructor of VectorField is similar to the constructor of
Points: the input data can be an NxM Numpy array where the first
two columns corresponds to the X,Y coordinates in sheet
coordinates, within the declared bounding region. As with Points,
the input can be a tuple of array objects or of objects that can
be cast to arrays (the tuple elements are joined column-wise).
The third column maps to the vector angle which must be specified
in radians. Note that it is possible to supply a collection which
isn't a numpy array, whereby each element of the collection is
assumed to be an iterable corresponding to a single column of the
NxM array.
The visualization of any additional columns is decided by the
plotting code. For instance, the fourth and fifth columns could
correspond to arrow length and colour map value. All that is
assumed is that these additional dimension are normalized between
0.0 and 1.0 for the default visualization to work well.
The only restriction is that the final data array is NxM where
M>3. In other words, the vector must have a dimensionality of 2 or
higher.
"""
group = param.String(default='VectorField', constant=True)
vdims = param.List(default=[Dimension('Angle', cyclic=True, range=(0,2*np.pi)),
Dimension('Magnitude')], bounds=(1, None))
_null_value = np.array([[], [], [], []]).T # For when data is None
_min_dims = 3 # Minimum number of columns
def __init__(self, data, kdims=None, vdims=None, **params):
if isinstance(data, list) and data and all(isinstance(d, np.ndarray) for d in data):
data = np.column_stack([d.flat if d.ndim > 1 else d for d in data])
super(VectorField, self).__init__(data, kdims=kdims, vdims=vdims, **params)
class Spikes(Chart):
"""
Spikes is a 1D or 2D Element, which represents a series of
vertical or horizontal lines distributed along some dimension. If
an additional dimension is supplied it will be used to specify the
height of the lines. The Element may therefore be used to
represent 1D distributions, spectrograms or spike trains in
electrophysiology.
"""
group = param.String(default='Spikes', constant=True)
kdims = param.List(default=[Dimension('x')], bounds=(1, 1))
vdims = param.List(default=[])
_auto_indexable_1d = False
class Area(Curve):
"""
An Area Element represents the area under a Curve
and is specified in the same format as a regular
Curve, with the key dimension corresponding to a
column of x-values and the value dimension
corresponding to a column of y-values. Optionally
a second value dimension may be supplied to shade
the region between the curves.
"""
group = param.String(default='Area', constant=True)
@classmethod
def stack(cls, areas):
"""
Stacks an (Nd)Overlay of Area or Curve Elements by offsetting
their baselines. To stack a HoloMap or DynamicMap use the map
method.
"""
if not len(areas):
return areas
baseline = np.zeros(len(areas.values()[0]))
stacked = areas.clone(shared_data=False)
vdims = [areas.values()[0].vdims[0], 'Baseline']
for k, area in areas.items():
x, y = (area.dimension_values(i) for i in range(2))
stacked[k] = area.clone((x, y+baseline, baseline), vdims=vdims,
new_type=Area)
baseline = baseline + y
return stacked
class BoxWhisker(Chart):
"""
BoxWhisker represent data as a distributions highlighting the
median, mean and various percentiles. It may have a single value
dimension and any number of key dimensions declaring the grouping
of each violin.
"""
group = param.String(default='BoxWhisker', constant=True)
kdims = param.List(default=[], bounds=(0,None))
vdims = param.List(default=[Dimension('y')], bounds=(1,1))
_auto_indexable_1d = False
| [
"param.List",
"numpy.nanmin",
"numpy.array",
"numpy.column_stack",
"param.String",
"numpy.nanmax"
] | [((801, 845), 'param.String', 'param.String', ([], {'default': '"""Chart"""', 'constant': '(True)'}), "(default='Chart', constant=True)\n", (813, 845), False, 'import param\n'), ((1999, 2045), 'param.String', 'param.String', ([], {'default': '"""Scatter"""', 'constant': '(True)'}), "(default='Scatter', constant=True)\n", (2011, 2045), False, 'import param\n'), ((2178, 2222), 'param.String', 'param.String', ([], {'default': '"""Curve"""', 'constant': '(True)'}), "(default='Curve', constant=True)\n", (2190, 2222), False, 'import param\n'), ((2538, 2691), 'param.String', 'param.String', ([], {'default': '"""ErrorBars"""', 'constant': '(True)', 'doc': '"""\n A string describing the quantity measured by the ErrorBars\n object."""'}), '(default=\'ErrorBars\', constant=True, doc=\n """\n A string describing the quantity measured by the ErrorBars\n object."""\n )\n', (2550, 2691), False, 'import param\n'), ((4239, 4284), 'param.String', 'param.String', ([], {'default': '"""Spread"""', 'constant': '(True)'}), "(default='Spread', constant=True)\n", (4251, 4284), False, 'import param\n'), ((4580, 4623), 'param.String', 'param.String', ([], {'default': '"""Bars"""', 'constant': '(True)'}), "(default='Bars', constant=True)\n", (4592, 4623), False, 'import param\n'), ((4952, 4980), 'param.List', 'param.List', ([], {'default': "['grid']"}), "(default=['grid'])\n", (4962, 4980), False, 'import param\n'), ((4994, 5042), 'param.String', 'param.String', ([], {'default': '"""Histogram"""', 'constant': '(True)'}), "(default='Histogram', constant=True)\n", (5006, 5042), False, 'import param\n'), ((7814, 7859), 'param.String', 'param.String', ([], {'default': '"""Points"""', 'constant': '(True)'}), "(default='Points', constant=True)\n", (7826, 7859), False, 'import param\n'), ((7873, 7895), 'param.List', 'param.List', ([], {'default': '[]'}), '(default=[])\n', (7883, 7895), False, 'import param\n'), ((9326, 9376), 'param.String', 'param.String', ([], {'default': '"""VectorField"""', 'constant': '(True)'}), "(default='VectorField', constant=True)\n", (9338, 9376), False, 'import param\n'), ((10412, 10457), 'param.String', 'param.String', ([], {'default': '"""Spikes"""', 'constant': '(True)'}), "(default='Spikes', constant=True)\n", (10424, 10457), False, 'import param\n'), ((10536, 10558), 'param.List', 'param.List', ([], {'default': '[]'}), '(default=[])\n', (10546, 10558), False, 'import param\n'), ((10991, 11034), 'param.String', 'param.String', ([], {'default': '"""Area"""', 'constant': '(True)'}), "(default='Area', constant=True)\n", (11003, 11034), False, 'import param\n'), ((12039, 12088), 'param.String', 'param.String', ([], {'default': '"""BoxWhisker"""', 'constant': '(True)'}), "(default='BoxWhisker', constant=True)\n", (12051, 12088), False, 'import param\n'), ((12102, 12142), 'param.List', 'param.List', ([], {'default': '[]', 'bounds': '(0, None)'}), '(default=[], bounds=(0, None))\n', (12112, 12142), False, 'import param\n'), ((9556, 9582), 'numpy.array', 'np.array', (['[[], [], [], []]'], {}), '([[], [], [], []])\n', (9564, 9582), True, 'import numpy as np\n'), ((3477, 3504), 'numpy.nanmin', 'np.nanmin', (['(mean - neg_error)'], {}), '(mean - neg_error)\n', (3486, 3504), True, 'import numpy as np\n'), ((3523, 3550), 'numpy.nanmax', 'np.nanmax', (['(mean + pos_error)'], {}), '(mean + pos_error)\n', (3532, 3550), True, 'import numpy as np\n'), ((9861, 9923), 'numpy.column_stack', 'np.column_stack', (['[(d.flat if d.ndim > 1 else d) for d in data]'], {}), '([(d.flat if d.ndim > 1 else d) for d in data])\n', (9876, 9923), True, 'import numpy as np\n')] |
import random
import numpy as np
def generate(total,list_total,five_hits,five_hits_and_miss):
m = []
for i in range(6):
temp = random.randint(1,10)
if temp == 1:
m.append(False)
else:
m.append(True)
num_hits = m.count(True)
total += num_hits
list_total.append(num_hits)
if m[0:5].count(True) == 5:
five_hits += 1
if not m[5]:
five_hits_and_miss += 1
return [total,list_total,five_hits,five_hits_and_miss]
total = 0
list_total = []
five_hits = 0
five_hits_and_miss = 0
for i in range(200000):
result = generate(total,list_total,five_hits,five_hits_and_miss)
total = result[0]
list_total = result[1]
five_hits = result[2]
five_hits_and_miss = result[3]
print(np.std(list_total))
print(total/1200000)
print(five_hits/200000)
print(five_hits_and_miss/five_hits)
| [
"numpy.std",
"random.randint"
] | [((790, 808), 'numpy.std', 'np.std', (['list_total'], {}), '(list_total)\n', (796, 808), True, 'import numpy as np\n'), ((143, 164), 'random.randint', 'random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (157, 164), False, 'import random\n')] |
from howtrader.app.cta_strategy import (
CtaTemplate,
StopOrder,
TickData,
BarData,
TradeData,
OrderData
)
from howtrader.app.cta_strategy.engine import CtaEngine
from howtrader.trader.event import EVENT_TIMER
from howtrader.event import Event
from howtrader.trader.object import Status, Direction, Interval, ContractData, AccountData
from howtrader.app.cta_strategy import BarGenerator
from typing import Optional, Union, Tuple
import numpy as np
import talib
from howtrader.trader.event import EVENT_CONTRACT, EVENT_ACCOUNT
class MyArrayManager(object):
"""
For:
1. time series container of bar data
2. calculating technical indicator value
"""
def __init__(self, size: int = 100):
"""Constructor"""
self.count: int = 0
self.size: int = size
self.inited: bool = False
self.open_array: np.ndarray = np.zeros(size)
self.high_array: np.ndarray = np.zeros(size)
self.low_array: np.ndarray = np.zeros(size)
self.close_array: np.ndarray = np.zeros(size)
self.volume_array: np.ndarray = np.zeros(size)
self.open_interest_array: np.ndarray = np.zeros(size)
def update_bar(self, bar: BarData) -> None:
"""
Update new bar data into array manager.
"""
self.count += 1
if not self.inited and self.count >= self.size:
self.inited = True
self.open_array[:-1] = self.open_array[1:]
self.high_array[:-1] = self.high_array[1:]
self.low_array[:-1] = self.low_array[1:]
self.close_array[:-1] = self.close_array[1:]
self.volume_array[:-1] = self.volume_array[1:]
self.open_interest_array[:-1] = self.open_interest_array[1:]
self.open_array[-1] = bar.open_price
self.high_array[-1] = bar.high_price
self.low_array[-1] = bar.low_price
self.close_array[-1] = bar.close_price
self.volume_array[-1] = bar.volume
self.open_interest_array[-1] = bar.open_interest
@property
def open(self) -> np.ndarray:
"""
Get open price time series.
"""
return self.open_array
@property
def high(self) -> np.ndarray:
"""
Get high price time series.
"""
return self.high_array
@property
def low(self) -> np.ndarray:
"""
Get low price time series.
"""
return self.low_array
@property
def close(self) -> np.ndarray:
"""
Get close price time series.
"""
return self.close_array
@property
def volume(self) -> np.ndarray:
"""
Get trading volume time series.
"""
return self.volume_array
@property
def open_interest(self) -> np.ndarray:
"""
Get trading volume time series.
"""
return self.open_interest_array
def sma(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
Simple moving average.
"""
result = talib.SMA(self.close, n)
if array:
return result
return result[-1]
def ema(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
Exponential moving average.
"""
result = talib.EMA(self.close, n)
if array:
return result
return result[-1]
def kama(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
KAMA.
"""
result = talib.KAMA(self.close, n)
if array:
return result
return result[-1]
def wma(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
WMA.
"""
result = talib.WMA(self.close, n)
if array:
return result
return result[-1]
def apo(
self,
fast_period: int,
slow_period: int,
matype: int = 0,
array: bool = False
) -> Union[float, np.ndarray]:
"""
APO.
"""
result = talib.APO(self.close, fast_period, slow_period, matype)
if array:
return result
return result[-1]
def cmo(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
CMO.
"""
result = talib.CMO(self.close, n)
if array:
return result
return result[-1]
def mom(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
MOM.
"""
result = talib.MOM(self.close, n)
if array:
return result
return result[-1]
def ppo(
self,
fast_period: int,
slow_period: int,
matype: int = 0,
array: bool = False
) -> Union[float, np.ndarray]:
"""
PPO.
"""
result = talib.PPO(self.close, fast_period, slow_period, matype)
if array:
return result
return result[-1]
def roc(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
ROC.
"""
result = talib.ROC(self.close, n)
if array:
return result
return result[-1]
def rocr(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
ROCR.
"""
result = talib.ROCR(self.close, n)
if array:
return result
return result[-1]
def rocp(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
ROCP.
"""
result = talib.ROCP(self.close, n)
if array:
return result
return result[-1]
def rocr_100(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
ROCR100.
"""
result = talib.ROCR100(self.close, n)
if array:
return result
return result[-1]
def trix(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
TRIX.
"""
result = talib.TRIX(self.close, n)
if array:
return result
return result[-1]
def std(self, n: int, nbdev: int = 1, array: bool = False) -> Union[float, np.ndarray]:
"""
Standard deviation.
"""
result = talib.STDDEV(self.close, n, nbdev)
if array:
return result
return result[-1]
def obv(self, array: bool = False) -> Union[float, np.ndarray]:
"""
OBV.
"""
result = talib.OBV(self.close, self.volume)
if array:
return result
return result[-1]
def cci(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
Commodity Channel Index (CCI).
"""
result = talib.CCI(self.high, self.low, self.close, n)
if array:
return result
return result[-1]
def atr(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
Average True Range (ATR).
"""
result = talib.ATR(self.high, self.low, self.close, n)
if array:
return result
return result[-1]
def natr(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
NATR.
"""
result = talib.NATR(self.high, self.low, self.close, n)
if array:
return result
return result[-1]
def rsi(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
Relative Strenght Index (RSI).
"""
result = talib.RSI(self.close, n)
if array:
return result
return result[-1]
def macd(
self,
fast_period: int,
slow_period: int,
signal_period: int,
array: bool = False
) -> Union[
Tuple[np.ndarray, np.ndarray, np.ndarray],
Tuple[float, float, float]
]:
"""
MACD.
"""
macd, signal, hist = talib.MACD(
self.close, fast_period, slow_period, signal_period
)
if array:
return macd, signal, hist
return macd[-1], signal[-1], hist[-1]
def adx(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
ADX.
"""
result = talib.ADX(self.high, self.low, self.close, n)
if array:
return result
return result[-1]
def adxr(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
ADXR.
"""
result = talib.ADXR(self.high, self.low, self.close, n)
if array:
return result
return result[-1]
def dx(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
DX.
"""
result = talib.DX(self.high, self.low, self.close, n)
if array:
return result
return result[-1]
def minus_di(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
MINUS_DI.
"""
result = talib.MINUS_DI(self.high, self.low, self.close, n)
if array:
return result
return result[-1]
def plus_di(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
PLUS_DI.
"""
result = talib.PLUS_DI(self.high, self.low, self.close, n)
if array:
return result
return result[-1]
def willr(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
WILLR.
"""
result = talib.WILLR(self.high, self.low, self.close, n)
if array:
return result
return result[-1]
def ultosc(
self,
time_period1: int = 7,
time_period2: int = 14,
time_period3: int = 28,
array: bool = False
) -> Union[float, np.ndarray]:
"""
Ultimate Oscillator.
"""
result = talib.ULTOSC(self.high, self.low, self.close, time_period1, time_period2, time_period3)
if array:
return result
return result[-1]
def trange(self, array: bool = False) -> Union[float, np.ndarray]:
"""
TRANGE.
"""
result = talib.TRANGE(self.high, self.low, self.close)
if array:
return result
return result[-1]
def boll(
self,
n: int,
dev: float,
array: bool = False
) -> Union[
Tuple[np.ndarray, np.ndarray],
Tuple[float, float]
]:
"""
Bollinger Channel.
"""
mid = self.sma(n, array)
std = self.std(n, 1, array)
up = mid + std * dev
down = mid - std * dev
return up, down
def keltner(
self,
n: int,
dev: float,
array: bool = False
) -> Union[
Tuple[np.ndarray, np.ndarray],
Tuple[float, float]
]:
"""
Keltner Channel.
"""
mid = self.sma(n, array)
atr = self.atr(n, array)
up = mid + atr * dev
down = mid - atr * dev
return up, down
def donchian(
self, n: int, array: bool = False
) -> Union[
Tuple[np.ndarray, np.ndarray],
Tuple[float, float]
]:
"""
Donchian Channel.
"""
up = talib.MAX(self.high, n)
down = talib.MIN(self.low, n)
if array:
return up, down
return up[-1], down[-1]
def aroon(
self,
n: int,
array: bool = False
) -> Union[
Tuple[np.ndarray, np.ndarray],
Tuple[float, float]
]:
"""
Aroon indicator.
"""
aroon_down, aroon_up = talib.AROON(self.high, self.low, n)
if array:
return aroon_up, aroon_down
return aroon_up[-1], aroon_down[-1]
def aroonosc(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
Aroon Oscillator.
"""
result = talib.AROONOSC(self.high, self.low, n)
if array:
return result
return result[-1]
def minus_dm(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
MINUS_DM.
"""
result = talib.MINUS_DM(self.high, self.low, n)
if array:
return result
return result[-1]
def plus_dm(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
PLUS_DM.
"""
result = talib.PLUS_DM(self.high, self.low, n)
if array:
return result
return result[-1]
def mfi(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
Money Flow Index.
"""
result = talib.MFI(self.high, self.low, self.close, self.volume, n)
if array:
return result
return result[-1]
def ad(self, array: bool = False) -> Union[float, np.ndarray]:
"""
AD.
"""
result = talib.AD(self.high, self.low, self.close, self.volume)
if array:
return result
return result[-1]
def adosc(
self,
fast_period: int,
slow_period: int,
array: bool = False
) -> Union[float, np.ndarray]:
"""
ADOSC.
"""
result = talib.ADOSC(self.high, self.low, self.close, self.volume, fast_period, slow_period)
if array:
return result
return result[-1]
def bop(self, array: bool = False) -> Union[float, np.ndarray]:
"""
BOP.
"""
result = talib.BOP(self.open, self.high, self.low, self.close)
if array:
return result
return result[-1]
class MartingleSpotStrategyV3(CtaTemplate):
"""
1. 马丁策略.
币安邀请链接: https://www.binancezh.pro/cn/futures/ref/51bitquant
币安合约邀请码:51bitquant
## 策略思路
1. 挑选1小时涨幅超过2.6%的币,或者4小涨幅超过4.6%的币, 且上引线不能过长(防止入场),然后入场
2. 利润超过1%,且最高价回调1%后平仓,当然你可以选择自己的参数
3. 如果入场后,没有利润,价格继续下跌。那么入场价格下跌5%后,采用马丁策略加仓。
"""
author = "51bitquant"
# 策略的核心参数.
initial_trading_value = 200 # 首次开仓价值 100USDT.
trading_value_multiplier = 2 # 加仓的比例.
max_increase_pos_count = 5 # 最大的加仓次数
hour_pump_pct = 0.026 # 小时的上涨百分比
four_hour_pump_pct = 0.046 # 四小时的上涨百分比.
high_close_change_pct = 0.03 # 最高价/收盘价 -1, 防止上引线过长.
increase_pos_when_dump_pct = 0.05 # 价格下跌 5%就继续加仓.
exit_profit_pct = 0.01 # 出场平仓百分比 1%
exit_pull_back_pct = 0.01 # 最高价回调超过1%,且利润超过1% 就出场.
trading_fee = 0.00075 # 交易手续费
# 变量
avg_price = 0.0 # 当前持仓的平均价格.
last_entry_price = 0.0 # 上一次入场的价格.
entry_highest_price = 0.0
current_pos = 0.0 # 当前的持仓的数量.
current_increase_pos_count = 0 # 当前的加仓的次数.
total_profit = 0 # 统计总的利润.
parameters = ["initial_trading_value", "trading_value_multiplier", "max_increase_pos_count",
"hour_pump_pct", "four_hour_pump_pct", "high_close_change_pct", "increase_pos_when_dump_pct",
"exit_profit_pct",
"exit_pull_back_pct", "trading_fee"]
variables = ["avg_price", "last_entry_price", "entry_highest_price", "current_pos", "current_increase_pos_count",
"total_profit"]
def __init__(self, cta_engine: CtaEngine, strategy_name, vt_symbol, setting):
""""""
super().__init__(cta_engine, strategy_name, vt_symbol, setting)
self.last_filled_order: Optional[OrderData, None] = None
self.tick: Optional[TickData, None] = None
self.contract: Optional[ContractData, None] = None
self.account: Optional[AccountData, None] = None
self.bg_1hour = BarGenerator(self.on_bar, 1, on_window_bar=self.on_1hour_bar, interval=Interval.HOUR) # 1hour
self.bg_4hour = BarGenerator(self.on_bar, 4, on_window_bar=self.on_4hour_bar, interval=Interval.HOUR) # 4hour
# self.cta_engine.event_engine.register(EVENT_ACCOUNT + 'BINANCE.币名称', self.process_acccount_event)
# self.cta_engine.event_engine.register(EVENT_ACCOUNT + "BINANCE.USDT", self.process_account_event)
self.buy_orders = [] # 买单id列表。
self.sell_orders = [] # 卖单id列表。
self.min_notional = 11 # 最小的交易金额.
def on_init(self):
"""
Callback when strategy is inited.
"""
self.write_log("策略初始化")
self.load_bar(3) # 加载3天的数据.
def on_start(self):
"""
Callback when strategy is started.
"""
self.write_log("策略启动")
def on_stop(self):
"""
Callback when strategy is stopped.
"""
self.write_log("策略停止")
# def process_account_event(self, event: Event):
# self.account: AccountData = event.data
# if self.account:
# print(
# f"self.account: available{self.account.available}, balance:{self.account.balance}, frozen: {self.account.frozen}")
def on_tick(self, tick: TickData):
"""
Callback of new tick data update.
"""
if tick.bid_price_1 > 0 and tick.ask_price_1 > 0:
self.bg_1hour.update_tick(tick)
self.bg_4hour.update_tick(tick)
def on_bar(self, bar: BarData):
"""
Callback of new bar data update.
"""
if self.entry_highest_price > 0:
self.entry_highest_price = max(bar.high_price, self.entry_highest_price)
if self.current_pos * bar.close_price >= self.min_notional:
# 有仓位
if len(self.sell_orders) <= 0 < self.avg_price:
# 有利润平仓的时候
# 清理掉其他买单.
profit_percent = bar.close_price / self.avg_price - 1
profit_pull_back_pct = self.entry_highest_price / bar.close_price - 1
if profit_percent >= self.exit_profit_pct and profit_pull_back_pct >= self.exit_pull_back_pct:
self.cancel_all()
orderids = self.sell(bar.close_price, abs(self.current_pos))
self.sell_orders.extend(orderids)
if len(self.buy_orders) <= 0:
# 考虑加仓的条件: 1) 当前有仓位,且仓位值要大于11USDTyi以上,2)加仓的次数小于最大的加仓次数,3)当前的价格比上次入场的价格跌了一定的百分比。
dump_down_pct = self.last_entry_price / bar.close_price - 1
if self.current_increase_pos_count <= self.max_increase_pos_count and dump_down_pct >= self.increase_pos_when_dump_pct:
# ** 表示的是乘方.
self.cancel_all() # 清理其他卖单.
increase_pos_value = self.initial_trading_value * self.trading_value_multiplier ** self.current_increase_pos_count
price = bar.close_price
vol = increase_pos_value / price
orderids = self.buy(price, vol)
self.buy_orders.extend(orderids)
self.bg_1hour.update_bar(bar)
self.bg_4hour.update_bar(bar)
self.put_event()
def on_1hour_bar(self, bar: BarData):
close_change_pct = bar.close_price / bar.open_price - 1 # 收盘价涨了多少.
high_change_pct = bar.high_price / bar.close_price - 1 # 计算上引线
# 回调一定比例的时候.
if self.current_pos * bar.close_price < self.min_notional: # 10 USDT
# 每次下单要大于等于10USDT, 为了简单设置11USDT.
if close_change_pct >= self.hour_pump_pct and high_change_pct < self.high_close_change_pct and len(
self.buy_orders) == 0:
# 这里没有仓位.
# 重置当前的数据.
self.cancel_all()
self.current_increase_pos_count = 0
self.avg_price = 0
self.entry_highest_price = 0.0
price = bar.close_price
vol = self.initial_trading_value / price
orderids = self.buy(price, vol)
self.buy_orders.extend(orderids) # 以及已经下单的orderids.
def on_4hour_bar(self, bar: BarData):
close_change_pct = bar.close_price / bar.open_price - 1 # 收盘价涨了多少.
high_change_pct = bar.high_price / bar.close_price - 1 # 计算上引线
# 回调一定比例的时候.
if self.current_pos * bar.close_price < self.min_notional:
# 每次下单要大于等于10USDT, 为了简单设置11USDT.
if close_change_pct >= self.four_hour_pump_pct and high_change_pct < self.high_close_change_pct and len(
self.buy_orders) == 0:
# 这里没有仓位.
# 重置当前的数据.
self.cancel_all()
self.current_increase_pos_count = 0
self.avg_price = 0
self.entry_highest_price = 0.0
price = bar.close_price
vol = self.initial_trading_value / price
orderids = self.buy(price, vol)
self.buy_orders.extend(orderids) # 以及已经下单的orderids.
def on_order(self, order: OrderData):
"""
Callback of new order data update.
"""
if order.status == Status.ALLTRADED:
if order.direction == Direction.LONG:
# 买单成交.
self.current_increase_pos_count += 1
self.last_entry_price = order.price # 记录上一次成绩的价格.
self.entry_highest_price = order.price
if not order.is_active():
if order.vt_orderid in self.sell_orders:
self.sell_orders.remove(order.vt_orderid)
elif order.vt_orderid in self.buy_orders:
self.buy_orders.remove(order.vt_orderid)
self.put_event() # 更新UI使用.
def on_trade(self, trade: TradeData):
"""
Callback of new trade data update.
"""
if trade.direction == Direction.LONG:
total = self.avg_price * self.current_pos + trade.price * trade.volume
self.current_pos += trade.volume
self.avg_price = total / self.current_pos
elif trade.direction == Direction.SHORT:
self.current_pos -= trade.volume
# 计算统计下总体的利润.
profit = (trade.price - self.avg_price) * trade.volume
total_fee = trade.volume * trade.price * 2 * self.trading_fee
self.total_profit += profit - total_fee
self.put_event()
def on_stop_order(self, stop_order: StopOrder):
"""
Callback of stop order update.
"""
pass
| [
"talib.ADXR",
"talib.TRANGE",
"talib.WILLR",
"talib.KAMA",
"talib.MOM",
"talib.AROONOSC",
"talib.MAX",
"talib.ADX",
"talib.PLUS_DI",
"howtrader.app.cta_strategy.BarGenerator",
"talib.ROCR",
"talib.APO",
"talib.AD",
"talib.MINUS_DM",
"talib.NATR",
"talib.DX",
"talib.BOP",
"talib.MIN... | [((894, 908), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (902, 908), True, 'import numpy as np\n'), ((947, 961), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (955, 961), True, 'import numpy as np\n'), ((999, 1013), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (1007, 1013), True, 'import numpy as np\n'), ((1053, 1067), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (1061, 1067), True, 'import numpy as np\n'), ((1108, 1122), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (1116, 1122), True, 'import numpy as np\n'), ((1170, 1184), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (1178, 1184), True, 'import numpy as np\n'), ((3046, 3070), 'talib.SMA', 'talib.SMA', (['self.close', 'n'], {}), '(self.close, n)\n', (3055, 3070), False, 'import talib\n'), ((3295, 3319), 'talib.EMA', 'talib.EMA', (['self.close', 'n'], {}), '(self.close, n)\n', (3304, 3319), False, 'import talib\n'), ((3523, 3548), 'talib.KAMA', 'talib.KAMA', (['self.close', 'n'], {}), '(self.close, n)\n', (3533, 3548), False, 'import talib\n'), ((3750, 3774), 'talib.WMA', 'talib.WMA', (['self.close', 'n'], {}), '(self.close, n)\n', (3759, 3774), False, 'import talib\n'), ((4087, 4142), 'talib.APO', 'talib.APO', (['self.close', 'fast_period', 'slow_period', 'matype'], {}), '(self.close, fast_period, slow_period, matype)\n', (4096, 4142), False, 'import talib\n'), ((4344, 4368), 'talib.CMO', 'talib.CMO', (['self.close', 'n'], {}), '(self.close, n)\n', (4353, 4368), False, 'import talib\n'), ((4570, 4594), 'talib.MOM', 'talib.MOM', (['self.close', 'n'], {}), '(self.close, n)\n', (4579, 4594), False, 'import talib\n'), ((4907, 4962), 'talib.PPO', 'talib.PPO', (['self.close', 'fast_period', 'slow_period', 'matype'], {}), '(self.close, fast_period, slow_period, matype)\n', (4916, 4962), False, 'import talib\n'), ((5164, 5188), 'talib.ROC', 'talib.ROC', (['self.close', 'n'], {}), '(self.close, n)\n', (5173, 5188), False, 'import talib\n'), ((5392, 5417), 'talib.ROCR', 'talib.ROCR', (['self.close', 'n'], {}), '(self.close, n)\n', (5402, 5417), False, 'import talib\n'), ((5621, 5646), 'talib.ROCP', 'talib.ROCP', (['self.close', 'n'], {}), '(self.close, n)\n', (5631, 5646), False, 'import talib\n'), ((5857, 5885), 'talib.ROCR100', 'talib.ROCR100', (['self.close', 'n'], {}), '(self.close, n)\n', (5870, 5885), False, 'import talib\n'), ((6089, 6114), 'talib.TRIX', 'talib.TRIX', (['self.close', 'n'], {}), '(self.close, n)\n', (6099, 6114), False, 'import talib\n'), ((6347, 6381), 'talib.STDDEV', 'talib.STDDEV', (['self.close', 'n', 'nbdev'], {}), '(self.close, n, nbdev)\n', (6359, 6381), False, 'import talib\n'), ((6575, 6609), 'talib.OBV', 'talib.OBV', (['self.close', 'self.volume'], {}), '(self.close, self.volume)\n', (6584, 6609), False, 'import talib\n'), ((6837, 6882), 'talib.CCI', 'talib.CCI', (['self.high', 'self.low', 'self.close', 'n'], {}), '(self.high, self.low, self.close, n)\n', (6846, 6882), False, 'import talib\n'), ((7105, 7150), 'talib.ATR', 'talib.ATR', (['self.high', 'self.low', 'self.close', 'n'], {}), '(self.high, self.low, self.close, n)\n', (7114, 7150), False, 'import talib\n'), ((7354, 7400), 'talib.NATR', 'talib.NATR', (['self.high', 'self.low', 'self.close', 'n'], {}), '(self.high, self.low, self.close, n)\n', (7364, 7400), False, 'import talib\n'), ((7628, 7652), 'talib.RSI', 'talib.RSI', (['self.close', 'n'], {}), '(self.close, n)\n', (7637, 7652), False, 'import talib\n'), ((8056, 8119), 'talib.MACD', 'talib.MACD', (['self.close', 'fast_period', 'slow_period', 'signal_period'], {}), '(self.close, fast_period, slow_period, signal_period)\n', (8066, 8119), False, 'import talib\n'), ((8375, 8420), 'talib.ADX', 'talib.ADX', (['self.high', 'self.low', 'self.close', 'n'], {}), '(self.high, self.low, self.close, n)\n', (8384, 8420), False, 'import talib\n'), ((8624, 8670), 'talib.ADXR', 'talib.ADXR', (['self.high', 'self.low', 'self.close', 'n'], {}), '(self.high, self.low, self.close, n)\n', (8634, 8670), False, 'import talib\n'), ((8870, 8914), 'talib.DX', 'talib.DX', (['self.high', 'self.low', 'self.close', 'n'], {}), '(self.high, self.low, self.close, n)\n', (8878, 8914), False, 'import talib\n'), ((9126, 9176), 'talib.MINUS_DI', 'talib.MINUS_DI', (['self.high', 'self.low', 'self.close', 'n'], {}), '(self.high, self.low, self.close, n)\n', (9140, 9176), False, 'import talib\n'), ((9386, 9435), 'talib.PLUS_DI', 'talib.PLUS_DI', (['self.high', 'self.low', 'self.close', 'n'], {}), '(self.high, self.low, self.close, n)\n', (9399, 9435), False, 'import talib\n'), ((9641, 9688), 'talib.WILLR', 'talib.WILLR', (['self.high', 'self.low', 'self.close', 'n'], {}), '(self.high, self.low, self.close, n)\n', (9652, 9688), False, 'import talib\n'), ((10038, 10129), 'talib.ULTOSC', 'talib.ULTOSC', (['self.high', 'self.low', 'self.close', 'time_period1', 'time_period2', 'time_period3'], {}), '(self.high, self.low, self.close, time_period1, time_period2,\n time_period3)\n', (10050, 10129), False, 'import talib\n'), ((10325, 10370), 'talib.TRANGE', 'talib.TRANGE', (['self.high', 'self.low', 'self.close'], {}), '(self.high, self.low, self.close)\n', (10337, 10370), False, 'import talib\n'), ((11467, 11490), 'talib.MAX', 'talib.MAX', (['self.high', 'n'], {}), '(self.high, n)\n', (11476, 11490), False, 'import talib\n'), ((11506, 11528), 'talib.MIN', 'talib.MIN', (['self.low', 'n'], {}), '(self.low, n)\n', (11515, 11528), False, 'import talib\n'), ((11864, 11899), 'talib.AROON', 'talib.AROON', (['self.high', 'self.low', 'n'], {}), '(self.high, self.low, n)\n', (11875, 11899), False, 'import talib\n'), ((12152, 12190), 'talib.AROONOSC', 'talib.AROONOSC', (['self.high', 'self.low', 'n'], {}), '(self.high, self.low, n)\n', (12166, 12190), False, 'import talib\n'), ((12403, 12441), 'talib.MINUS_DM', 'talib.MINUS_DM', (['self.high', 'self.low', 'n'], {}), '(self.high, self.low, n)\n', (12417, 12441), False, 'import talib\n'), ((12652, 12689), 'talib.PLUS_DM', 'talib.PLUS_DM', (['self.high', 'self.low', 'n'], {}), '(self.high, self.low, n)\n', (12665, 12689), False, 'import talib\n'), ((12905, 12963), 'talib.MFI', 'talib.MFI', (['self.high', 'self.low', 'self.close', 'self.volume', 'n'], {}), '(self.high, self.low, self.close, self.volume, n)\n', (12914, 12963), False, 'import talib\n'), ((13155, 13209), 'talib.AD', 'talib.AD', (['self.high', 'self.low', 'self.close', 'self.volume'], {}), '(self.high, self.low, self.close, self.volume)\n', (13163, 13209), False, 'import talib\n'), ((13497, 13584), 'talib.ADOSC', 'talib.ADOSC', (['self.high', 'self.low', 'self.close', 'self.volume', 'fast_period', 'slow_period'], {}), '(self.high, self.low, self.close, self.volume, fast_period,\n slow_period)\n', (13508, 13584), False, 'import talib\n'), ((13774, 13827), 'talib.BOP', 'talib.BOP', (['self.open', 'self.high', 'self.low', 'self.close'], {}), '(self.open, self.high, self.low, self.close)\n', (13783, 13827), False, 'import talib\n'), ((15838, 15928), 'howtrader.app.cta_strategy.BarGenerator', 'BarGenerator', (['self.on_bar', '(1)'], {'on_window_bar': 'self.on_1hour_bar', 'interval': 'Interval.HOUR'}), '(self.on_bar, 1, on_window_bar=self.on_1hour_bar, interval=\n Interval.HOUR)\n', (15850, 15928), False, 'from howtrader.app.cta_strategy import BarGenerator\n'), ((15957, 16047), 'howtrader.app.cta_strategy.BarGenerator', 'BarGenerator', (['self.on_bar', '(4)'], {'on_window_bar': 'self.on_4hour_bar', 'interval': 'Interval.HOUR'}), '(self.on_bar, 4, on_window_bar=self.on_4hour_bar, interval=\n Interval.HOUR)\n', (15969, 16047), False, 'from howtrader.app.cta_strategy import BarGenerator\n')] |
## Generates prediction of the steering angles for a given dashboard image
## Calls the imitation learning model trained
# and obtained from the training code and imitates the expert's training data
## Access the test images from the testset folder to generate
# predictions on them
import os
import cv2
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy
from torch.utils import data
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import MultiStepLR
from scipy import signal
import glob
def toDevice(datas, device):
"""Enable cuda."""
imgs, angles = datas
return imgs.float().to(device), angles.float().to(device)
def augment(img, angle):
"""Data augmentation."""
#load the image
current_image = cv2.imread(img)
#cropping image to remove the sky
current_image = current_image[60::,::]
return current_image, angle
def change_bright(img):
# convert rgb to hsv
hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
rand = np.random.uniform(0.5,1.0)
# change the brightness value
hsv[:,:,2] = rand*hsv[:,:2]
# covert back hsv to rgb
new_img = cv2.cv2.cvtColor(hsv,cv2.COLOR_HSV2RGB)
return new_img
def load_data(img_paths,steers, test_size):
"""Load training data and train validation split"""
# Divide the data into training set and validation set
data_df = pd.DataFrame({'center':img_paths,'steering':steers})
train_set = "Null"
valset = data_df.values.tolist()
return train_set,valset
class TripletDataset(data.Dataset):
# Pytorch standard data load
def __init__(self,dataroot,samples, transform=None):
self.samples = samples
self.dataroot = dataroot
self.transform = transform
def __getitem__(self, index):
batch_samples = self.samples[index]
steering_angle = float(batch_samples[1])
# Data preprocessing
center_img, steering_angle_center = augment(batch_samples[0],steering_angle)
return (center_img, steering_angle_center)
def __len__(self):
return len(self.samples)
def data_loader(dataroot, trainset, valset, batch_size, shuffle, num_workers):
"""dataset Loader.
Args:
trainset: training set
valset: validation set
batch size
shuffle ratio
num_workers: number of workers in DataLoader
Returns:
trainloader (torch.utils.data.DataLoader): DataLoader for training set
testloader (torch.utils.data.DataLoader): DataLoader for validation set
"""
transformations = transforms.Compose(
[transforms.Lambda(lambda x: (x / 127.5) - 1.0)])
# Load training data and validation data
training_set = TripletDataset(dataroot,trainset, transformations)
trainloader = DataLoader(training_set,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers)
validation_set = TripletDataset(dataroot, valset, transformations)
valloader = DataLoader(validation_set,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers)
return trainloader, valloader
class NetworkNvidia(nn.Module):
"""NVIDIA model used in the paper."""
def __init__(self):
"""The NVIDIA architecture.
Data preprocessing and image normalisation
Convolution: 5x5, filter: 24, strides: 2x2, activation: ELU
The 5 convolution layers are for feature extraction.
The fully connected layers are predict the steering angless
"""
super(NetworkNvidia, self).__init__()
self.conv_layers = nn.Sequential(
# convolution layer 1
nn.Conv2d(3, 24, 5, stride=2),
nn.ELU(),
# convolution layer 2
nn.Conv2d(24, 36, 5, stride=2),
nn.ELU(),
# convolution layer 3
nn.Conv2d(36, 48, 5, stride=2),
nn.ELU(),
# convolution layer 4
nn.Conv2d(48, 64, 3),
nn.ELU(),
# convolution layer 5
nn.Conv2d(64, 64, 3),
nn.Dropout(0.5)
)
self.linear_layers = nn.Sequential(
# fully connected layer 1
nn.Linear(in_features=64 * 2 * 425, out_features=150),
nn.ELU(),
# fully connected layer 2
nn.Linear(in_features=150, out_features=80),
nn.ELU(),
# fully connected layer 3
nn.Linear(in_features=80, out_features=10),
# fully connected layer 4
nn.Linear(in_features=10, out_features=1)
)
def forward(self, input):
"""Forward propogation"""
# change the tensor shape
input = input.view(input.size(0), 3, 196, 455)
# pass the input to the 5 convolution layers
output = self.conv_layers(input)
# reshape the features to pass it into activation layers
output = output.view(output.size(0), -1)
# pass the feature vectors to the fully connected layers
output = self.linear_layers(output)
return output
class Inference(object):
"""Testing"""
def __init__(self,model,device,criterion,optimizer,validationloader,valset):
super(Inference, self).__init__()
self.model = model
self.device = device
self.criterion = criterion
self.optimizer = optimizer
self.validationloader = validationloader
self.sum = 0
self.valset = valset
self.imgList = []
def test(self):
""" inference"""
self.model.to(self.device)
self.model.eval()
with torch.set_grad_enabled(False):
for local_batch, (centers) in enumerate(self.validationloader):
# Transfer to GPU
centers = toDevice(centers, self.device)
# Model computations
self.optimizer.zero_grad()
datas = [centers]
for data in datas:
imgs, angles = data
# prediction from the model
outputs = self.model(imgs)
print ("steering_angle= ", outputs.tolist()[0], "true_angle = ", angles[0])
print ("deviation =", angles[0]-outputs.tolist()[0][0])
print ()
#print(self.valset[local_batch][0])
file = glob.glob(self.valset[local_batch][0])
test_img = cv2.imread(file[0])
font = cv2.FONT_HERSHEY_SIMPLEX
# org
org = (10, 50)
# fontScale
fontScale = 0.5
# Blue color in BGR
color = (255, 0, 0)
# Line thickness of 2 px
thickness = 1
label = "True_steering_angle= "+ str(round(abs(angles[0].item()),4))
test_img = cv2.putText(test_img, label, org, font,
fontScale, color, thickness, cv2.LINE_AA)
model_res = "Predicted_steering_angle= "+ str(round(abs(outputs.tolist()[0][0]),4))
test_img = cv2.putText(test_img, model_res, (10,30), font,
fontScale, (0,255,0), thickness, cv2.LINE_AA)
h,w,_ = test_img.shape
size = (w,h)
self.imgList.append(test_img)
out =cv2.VideoWriter("imitation.avi",cv2.VideoWriter_fourcc(*'DIVX'),15,size)
for i in range(len(self.imgList)):
out.write(self.imgList[i])
out.release()
def main():
print(torch.__version__)
print(torch.cuda.device_count())
print(torch.cuda.is_available())
## Load the data you want ot test
data_path = "test.txt"
dataroot = []
steers = []
## data loading
with open(data_path) as file:
for line in file:
if line.split(',')[0]=="center":continue
dataroot.append('testset/' + line.split(' ')[0])
steers.append(line.split(' ')[1].strip())
# Model hyperparameters
lr = 1e-5
weight_decay = 1e-5
batch_size = 1
num_workers = 8
test_size = 0.01
shuffle = False
# Load the data in from of Tensors
trainset, valset = load_data(dataroot,steers, test_size)
_, validationloader = data_loader(dataroot,
trainset, valset,
batch_size,
shuffle,
num_workers)
# Call the network
print("Imitation model initialisation")
model = NetworkNvidia()
print("==> Initialize model done ...")
# Define optimizer and criterion
optimizer = optim.Adam(model.parameters(),
lr=lr,
weight_decay=weight_decay)
# Define the loss function
criterion = nn.MSELoss()
# Use Gpu if available..else run CPU
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print("Accesing you device CPU/GPU?(cuda)",device)
# load the trained model
model = torch.load("imitation_model/real.ckpt")
# Test the model output
infer = Inference(model,
device,
criterion,
optimizer,
validationloader,valset)
infer.test()
if __name__ == "__main__":
main()
| [
"torch.nn.Dropout",
"cv2.VideoWriter_fourcc",
"torch.cuda.device_count",
"glob.glob",
"cv2.cv2.cvtColor",
"pandas.DataFrame",
"torch.nn.MSELoss",
"torch.utils.data.DataLoader",
"cv2.cvtColor",
"torch.load",
"torchvision.transforms.Lambda",
"torch.nn.Linear",
"torch.nn.Conv2d",
"torch.nn.EL... | [((894, 909), 'cv2.imread', 'cv2.imread', (['img'], {}), '(img)\n', (904, 909), False, 'import cv2\n'), ((1084, 1120), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2HSV'], {}), '(img, cv2.COLOR_RGB2HSV)\n', (1096, 1120), False, 'import cv2\n'), ((1132, 1159), 'numpy.random.uniform', 'np.random.uniform', (['(0.5)', '(1.0)'], {}), '(0.5, 1.0)\n', (1149, 1159), True, 'import numpy as np\n'), ((1268, 1308), 'cv2.cv2.cvtColor', 'cv2.cv2.cvtColor', (['hsv', 'cv2.COLOR_HSV2RGB'], {}), '(hsv, cv2.COLOR_HSV2RGB)\n', (1284, 1308), False, 'import cv2\n'), ((1502, 1557), 'pandas.DataFrame', 'pd.DataFrame', (["{'center': img_paths, 'steering': steers}"], {}), "({'center': img_paths, 'steering': steers})\n", (1514, 1557), True, 'import pandas as pd\n'), ((2908, 3001), 'torch.utils.data.DataLoader', 'DataLoader', (['training_set'], {'batch_size': 'batch_size', 'shuffle': 'shuffle', 'num_workers': 'num_workers'}), '(training_set, batch_size=batch_size, shuffle=shuffle,\n num_workers=num_workers)\n', (2918, 3001), False, 'from torch.utils.data import DataLoader\n'), ((3173, 3268), 'torch.utils.data.DataLoader', 'DataLoader', (['validation_set'], {'batch_size': 'batch_size', 'shuffle': 'shuffle', 'num_workers': 'num_workers'}), '(validation_set, batch_size=batch_size, shuffle=shuffle,\n num_workers=num_workers)\n', (3183, 3268), False, 'from torch.utils.data import DataLoader\n'), ((9186, 9198), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (9196, 9198), True, 'import torch.nn as nn\n'), ((9411, 9450), 'torch.load', 'torch.load', (['"""imitation_model/real.ckpt"""'], {}), "('imitation_model/real.ckpt')\n", (9421, 9450), False, 'import torch\n'), ((7894, 7919), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (7917, 7919), False, 'import torch\n'), ((7931, 7956), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7954, 7956), False, 'import torch\n'), ((2725, 2769), 'torchvision.transforms.Lambda', 'transforms.Lambda', (['(lambda x: x / 127.5 - 1.0)'], {}), '(lambda x: x / 127.5 - 1.0)\n', (2742, 2769), True, 'import torchvision.transforms as transforms\n'), ((3929, 3958), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(24)', '(5)'], {'stride': '(2)'}), '(3, 24, 5, stride=2)\n', (3938, 3958), True, 'import torch.nn as nn\n'), ((3972, 3980), 'torch.nn.ELU', 'nn.ELU', ([], {}), '()\n', (3978, 3980), True, 'import torch.nn as nn\n'), ((4028, 4058), 'torch.nn.Conv2d', 'nn.Conv2d', (['(24)', '(36)', '(5)'], {'stride': '(2)'}), '(24, 36, 5, stride=2)\n', (4037, 4058), True, 'import torch.nn as nn\n'), ((4072, 4080), 'torch.nn.ELU', 'nn.ELU', ([], {}), '()\n', (4078, 4080), True, 'import torch.nn as nn\n'), ((4128, 4158), 'torch.nn.Conv2d', 'nn.Conv2d', (['(36)', '(48)', '(5)'], {'stride': '(2)'}), '(36, 48, 5, stride=2)\n', (4137, 4158), True, 'import torch.nn as nn\n'), ((4172, 4180), 'torch.nn.ELU', 'nn.ELU', ([], {}), '()\n', (4178, 4180), True, 'import torch.nn as nn\n'), ((4228, 4248), 'torch.nn.Conv2d', 'nn.Conv2d', (['(48)', '(64)', '(3)'], {}), '(48, 64, 3)\n', (4237, 4248), True, 'import torch.nn as nn\n'), ((4262, 4270), 'torch.nn.ELU', 'nn.ELU', ([], {}), '()\n', (4268, 4270), True, 'import torch.nn as nn\n'), ((4318, 4338), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)', '(3)'], {}), '(64, 64, 3)\n', (4327, 4338), True, 'import torch.nn as nn\n'), ((4352, 4367), 'torch.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (4362, 4367), True, 'import torch.nn as nn\n'), ((4472, 4525), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': '(64 * 2 * 425)', 'out_features': '(150)'}), '(in_features=64 * 2 * 425, out_features=150)\n', (4481, 4525), True, 'import torch.nn as nn\n'), ((4539, 4547), 'torch.nn.ELU', 'nn.ELU', ([], {}), '()\n', (4545, 4547), True, 'import torch.nn as nn\n'), ((4599, 4642), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': '(150)', 'out_features': '(80)'}), '(in_features=150, out_features=80)\n', (4608, 4642), True, 'import torch.nn as nn\n'), ((4656, 4664), 'torch.nn.ELU', 'nn.ELU', ([], {}), '()\n', (4662, 4664), True, 'import torch.nn as nn\n'), ((4716, 4758), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': '(80)', 'out_features': '(10)'}), '(in_features=80, out_features=10)\n', (4725, 4758), True, 'import torch.nn as nn\n'), ((4810, 4851), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': '(10)', 'out_features': '(1)'}), '(in_features=10, out_features=1)\n', (4819, 4851), True, 'import torch.nn as nn\n'), ((5896, 5925), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (5918, 5925), False, 'import torch\n'), ((9276, 9301), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (9299, 9301), False, 'import torch\n'), ((6657, 6695), 'glob.glob', 'glob.glob', (['self.valset[local_batch][0]'], {}), '(self.valset[local_batch][0])\n', (6666, 6695), False, 'import glob\n'), ((6723, 6742), 'cv2.imread', 'cv2.imread', (['file[0]'], {}), '(file[0])\n', (6733, 6742), False, 'import cv2\n'), ((7166, 7252), 'cv2.putText', 'cv2.putText', (['test_img', 'label', 'org', 'font', 'fontScale', 'color', 'thickness', 'cv2.LINE_AA'], {}), '(test_img, label, org, font, fontScale, color, thickness, cv2.\n LINE_AA)\n', (7177, 7252), False, 'import cv2\n'), ((7398, 7498), 'cv2.putText', 'cv2.putText', (['test_img', 'model_res', '(10, 30)', 'font', 'fontScale', '(0, 255, 0)', 'thickness', 'cv2.LINE_AA'], {}), '(test_img, model_res, (10, 30), font, fontScale, (0, 255, 0),\n thickness, cv2.LINE_AA)\n', (7409, 7498), False, 'import cv2\n'), ((7678, 7709), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'DIVX'"], {}), "(*'DIVX')\n", (7700, 7709), False, 'import cv2\n')] |
import numpy as np
def soda_strategy_discount(n_energy, n_nosugar):
if n_energy <= n_nosugar:
discount = -0.2;
else:
discount = 0.2;
return discount
def soda_strategy_nodiscount(n_energy, n_nosugar):
discount = 0.0;
return discount
def soda_strategy_param(n_energy, n_nosugar, T, V):
assert len(T) == len(V)
diff = abs(n_energy - n_nosugar)
sg = np.sign(n_energy - n_nosugar)
if diff < T[0]:
return 0
if T[0] < diff <= T[1]:
return sg * V[0]
if T[1] < diff <= T[2]:
return sg * V[1]
if T[2] < diff:
return sg * V[2]
return 0
| [
"numpy.sign"
] | [((401, 430), 'numpy.sign', 'np.sign', (['(n_energy - n_nosugar)'], {}), '(n_energy - n_nosugar)\n', (408, 430), True, 'import numpy as np\n')] |
"""
Summary
-------
Simulate expected revenue for a hotel.
"""
import numpy as np
from base import Model, Problem
class Hotel(Model):
"""
A model that simulates business of a hotel with Poisson arrival rate.
Attributes
----------
name : string
name of model
n_rngs : int
number of random-number generators used to run a simulation replication
n_responses : int
number of responses (performance measures)
factors : dict
changeable factors of the simulation model
specifications : dict
details of each factor (for GUI and data validation)
check_factor_list : dict
switch case for checking factor simulatability
Arguments
---------
fixed_factors : nested dict
fixed factors of the simulation model
See also
--------
base.Model
"""
def __init__(self, fixed_factors={}):
self.name = "HOTEL"
self.n_rngs = 1
self.n_responses = 1
self.specifications = {
"num_products": {
"description": "Number of products: (rate, length of stay).",
"datatype": int,
"default": 56
},
"lambda": {
"description": "Arrival rates for each product.",
"datatype": list,
"default": ((1 / 168) * np.array([1, 1, 2, 2, 3, 3, 2, 2, 1, 1, .5, .5, .25, .25,
1, 1, 2, 2, 3, 3, 2, 2, 1, 1, .5, .5, 1, 1,
2, 2, 3, 3, 2, 2, 1, 1, 1, 1, 2, 2, 3, 3,
2, 2, 1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 1, 1])).tolist()
},
"num_rooms": {
"description": "Hotel capacity.",
"datatype": int,
"default": 100
},
"discount_rate": {
"description": "Discount rate.",
"datatype": int,
"default": 100
},
"rack_rate": {
"description": "Rack rate (full price).",
"datatype": int,
"default": 200
},
"product_incidence": {
"description": "Incidence matrix",
"datatype": list,
"default": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1]]
},
"time_limit": {
"description": "Time after which orders of each product no longer arrive (e.g. Mon night stops at 3am Tues or t=27).",
"datatype": list,
"default": np.concatenate((27 * np.ones(14), 51 * np.ones(12), 75 * np.ones(10), 99 * np.ones(8), 123 * np.ones(6), 144 * np.ones(4), 168 * np.ones(2)), axis=None).tolist()
},
"time_before": {
"description": "Hours before t=0 to start running (e.g. 168 means start at time -168).",
"datatype": int,
"default": 168
},
"runlength": {
"description": "Runlength of simulation (in hours) after t=0.",
"datatype": int,
"default": 168
},
"booking_limits": {
"description": "Booking limits.",
"datatype": tuple,
"default": tuple([100 for _ in range(56)])
}
}
self.check_factor_list = {
"num_products": self.check_num_products,
"lambda": self.check_lambda,
"num_rooms": self.check_num_rooms,
"discount_rate": self.check_discount_rate,
"rack_rate": self.check_rack_rate,
"product_incidence": self.check_product_incidence,
"time_limit": self.check_time_limit,
"time_before": self.check_time_before,
"runlength": self.check_runlength,
"booking_limits": self.check_booking_limits
}
# Set factors of the simulation model.
super().__init__(fixed_factors)
def check_num_products(self):
return self.factors["num_products"] > 0
def check_lambda(self):
for i in self.factors["lambda"]:
if i <= 0:
return False
return len(self.factors["lambda"]) == self.factors["num_products"]
def check_num_rooms(self):
return self.factors["num_rooms"] > 0
def check_discount_rate(self):
return self.factors["discount_rate"] > 0
def check_rack_rate(self):
return self.factors["rack_rate"] > 0
def check_product_incidence(self):
m, n = self.factors["product_incidence"].shape
for i in range(m):
for j in range(n):
if self.factors["product_incidence"][i, j] <= 0:
return False
return m * n == self.factors["num_products"]
def check_time_limit(self):
for i in self.factors["time_limit"]:
if i <= 0:
return False
return len(self.factors["time_limit"]) == self.factors["num_products"]
def check_time_before(self):
return self.factors["time_before"] > 0
def check_runlength(self):
return self.factors["runlength"] > 0
def check_booking_limits(self):
for i in list(self.factors["booking_limits"]):
if i <= 0 or i > self.factors["num_rooms"]:
return False
return len(self.factors["booking_limits"]) == self.factors["num_products"]
def replicate(self, rng_list):
"""
Simulate a single replication for the current model factors.
Arguments
---------
rng_list : list of rng.MRG32k3a objects
rngs for model to use when simulating a replication
Returns
-------
responses : dict
performance measures of interest
"revenue" = expected revenue
gradients : dict of dicts
gradient estimates for each response
"""
# Designate separate random number generators.
arr_rng = rng_list[0]
total_revenue = 0
b = list(self.factors["booking_limits"])
A = np.array(self.factors["product_incidence"])
# Vector of next arrival time per product.
# (Starts at time = -1*time_before, e.g., t = -168.)
arrival = np.zeros(self.factors["num_products"]) - self.factors["time_before"]
# Upper bound on number of arrivals over the time period.
arr_bound = 10 * round(168 * np.sum(self.factors["lambda"]))
arr_time = np.zeros((self.factors["num_products"], arr_bound))
# Index of which arrival time to use next for each product.
a = np.zeros(self.factors["num_products"], dtype=int)
# Generate all interarrival times in advance.
for i in range(self.factors["num_products"]):
arr_time[i] = np.array([arr_rng.expovariate(self.factors["lambda"][i]) for _ in range(arr_bound)])
# Extract first arrivals.
for i in range(self.factors["num_products"]):
arrival[i] = arrival[i] + arr_time[i, a[i]]
a[i] = 1
min_time = 0 # Keeps track of minimum time of the orders not yet received.
while min_time <= self.factors["runlength"]:
min_time = self.factors["runlength"] + 1
for i in range(self.factors["num_products"]):
if ((arrival[i] < min_time) and (arrival[i] <= self.factors["time_limit"][i])):
min_time = arrival[i]
min_idx = i
if min_time > self.factors["runlength"]:
break
if b[min_idx] > 0:
if min_idx % 2 == 0: # Rack_rate.
total_revenue += sum(self.factors["rack_rate"] * A[:, min_idx])
else: # Discount_rate.
total_revenue += sum(self.factors["discount_rate"] * A[:, min_idx])
# Reduce the inventory of products sharing the same resource.
for i in range(self.factors["num_products"]):
if np.dot(A[:, i].T, A[:, min_idx]) >= 1:
if b[i] != 0:
b[i] -= 1
arrival[min_idx] += arr_time[min_idx, a[min_idx]]
a[min_idx] = a[min_idx] + 1
# Compose responses and gradients.
responses = {"revenue": total_revenue}
gradients = {response_key: {factor_key: np.nan for factor_key in self.specifications} for response_key in responses}
return responses, gradients
"""
Summary
-------
Maximize the expected revenue.
"""
class HotelRevenue(Problem):
"""
Base class to implement simulation-optimization problems.
Attributes
----------
name : string
name of problem
dim : int
number of decision variables
n_objectives : int
number of objectives
n_stochastic_constraints : int
number of stochastic constraints
minmax : tuple of int (+/- 1)
indicator of maximization (+1) or minimization (-1) for each objective
constraint_type : string
description of constraints types:
"unconstrained", "box", "deterministic", "stochastic"
variable_type : string
description of variable types:
"discrete", "continuous", "mixed"
lower_bounds : tuple
lower bound for each decision variable
upper_bounds : tuple
upper bound for each decision variable
gradient_available : bool
indicates if gradient of objective function is available
optimal_value : float
optimal objective function value
optimal_solution : tuple
optimal solution
model : Model object
associated simulation model that generates replications
model_default_factors : dict
default values for overriding model-level default factors
model_fixed_factors : dict
combination of overriden model-level factors and defaults
model_decision_factors : set of str
set of keys for factors that are decision variables
rng_list : list of rng.MRG32k3a objects
list of RNGs used to generate a random initial solution
or a random problem instance
factors : dict
changeable factors of the problem
initial_solution : list
default initial solution from which solvers start
budget : int > 0
max number of replications (fn evals) for a solver to take
specifications : dict
details of each factor (for GUI, data validation, and defaults)
Arguments
---------
name : str
user-specified name for problem
fixed_factors : dict
dictionary of user-specified problem factors
model_fixed factors : dict
subset of user-specified non-decision factors to pass through to the model
See also
--------
base.Problem
"""
def __init__(self, name="HOTEL-1", fixed_factors={}, model_fixed_factors={}):
self.name = name
self.n_objectives = 1
self.n_stochastic_constraints = 0
self.minmax = (1,)
self.constraint_type = "box"
self.variable_type = "discrete"
self.gradient_available = False
self.optimal_value = None
self.optimal_solution = None
self.model_default_factors = {}
self.model_decision_factors = {"booking_limits"}
self.factors = fixed_factors
self.specifications = {
"initial_solution": {
"description": "Initial solution.",
"datatype": tuple,
"default": tuple([0 for _ in range(56)])
},
"budget": {
"description": "Max # of replications for a solver to take.",
"datatype": int,
"default": 100
}
}
self.check_factor_list = {
"initial_solution": self.check_initial_solution,
"budget": self.check_budget
}
super().__init__(fixed_factors, model_fixed_factors)
# Instantiate model with fixed factors and over-riden defaults.
self.model = Hotel(self.model_fixed_factors)
self.dim = self.model.factors["num_products"]
self.lower_bounds = tuple(np.zeros(self.dim))
self.upper_bounds = tuple(self.model.factors["num_rooms"] * np.ones(self.dim))
def check_initial_solution(self):
return len(self.factors["initial_solution"]) == self.dim
def check_budget(self):
return self.factors["budget"] > 0
def check_simulatable_factors(self):
if len(self.lower_bounds) != self.dim:
return False
elif len(self.upper_bounds) != self.dim:
return False
else:
return True
def vector_to_factor_dict(self, vector):
"""
Convert a vector of variables to a dictionary with factor keys
Arguments
---------
vector : tuple
vector of values associated with decision variables
Returns
-------
factor_dict : dictionary
dictionary with factor keys and associated values
"""
factor_dict = {
"booking_limits": vector[:]
}
return factor_dict
def factor_dict_to_vector(self, factor_dict):
"""
Convert a dictionary with factor keys to a vector
of variables.
Arguments
---------
factor_dict : dictionary
dictionary with factor keys and associated values
Returns
-------
vector : tuple
vector of values associated with decision variables
"""
vector = tuple(factor_dict["booking_limits"])
return vector
def response_dict_to_objectives(self, response_dict):
"""
Convert a dictionary with response keys to a vector
of objectives.
Arguments
---------
response_dict : dictionary
dictionary with response keys and associated values
Returns
-------
objectives : tuple
vector of objectives
"""
objectives = (response_dict["revenue"],)
return objectives
def response_dict_to_stoch_constraints(self, response_dict):
"""
Convert a dictionary with response keys to a vector
of left-hand sides of stochastic constraints: E[Y] >= 0
Arguments
---------
response_dict : dictionary
dictionary with response keys and associated values
Returns
-------
stoch_constraints : tuple
vector of LHSs of stochastic constraint
"""
stoch_constraints = None
return stoch_constraints
def deterministic_stochastic_constraints_and_gradients(self, x):
"""
Compute deterministic components of stochastic constraints for a solution `x`.
Arguments
---------
x : tuple
vector of decision variables
Returns
-------
det_stoch_constraints : tuple
vector of deterministic components of stochastic constraints
det_stoch_constraints_gradients : tuple
vector of gradients of deterministic components of stochastic constraints
"""
det_stoch_constraints = None
det_stoch_constraints_gradients = None
return det_stoch_constraints, det_stoch_constraints_gradients
def deterministic_objectives_and_gradients(self, x):
"""
Compute deterministic components of objectives for a solution `x`.
Arguments
---------
x : tuple
vector of decision variables
Returns
-------
det_objectives : tuple
vector of deterministic components of objectives
det_objectives_gradients : tuple
vector of gradients of deterministic components of objectives
"""
det_objectives = (0,)
det_objectives_gradients = ((0,) * self.dim,)
return det_objectives, det_objectives_gradients
def check_deterministic_constraints(self, x):
"""
Check if a solution `x` satisfies the problem's deterministic constraints.
Arguments
---------
x : tuple
vector of decision variables
Returns
-------
satisfies : bool
indicates if solution `x` satisfies the deterministic constraints.
"""
return True
def get_random_solution(self, rand_sol_rng):
"""
Generate a random solution for starting or restarting solvers.
Arguments
---------
rand_sol_rng : rng.MRG32k3a object
random-number generator used to sample a new random solution
Returns
-------
x : tuple
vector of decision variables
"""
x = tuple([rand_sol_rng.randint(0, self.model.factors["num_rooms"]) for _ in range(self.dim)])
return x
| [
"numpy.sum",
"numpy.zeros",
"numpy.ones",
"numpy.array",
"numpy.dot"
] | [((7494, 7537), 'numpy.array', 'np.array', (["self.factors['product_incidence']"], {}), "(self.factors['product_incidence'])\n", (7502, 7537), True, 'import numpy as np\n'), ((7891, 7942), 'numpy.zeros', 'np.zeros', (["(self.factors['num_products'], arr_bound)"], {}), "((self.factors['num_products'], arr_bound))\n", (7899, 7942), True, 'import numpy as np\n'), ((8023, 8072), 'numpy.zeros', 'np.zeros', (["self.factors['num_products']"], {'dtype': 'int'}), "(self.factors['num_products'], dtype=int)\n", (8031, 8072), True, 'import numpy as np\n'), ((7668, 7706), 'numpy.zeros', 'np.zeros', (["self.factors['num_products']"], {}), "(self.factors['num_products'])\n", (7676, 7706), True, 'import numpy as np\n'), ((13593, 13611), 'numpy.zeros', 'np.zeros', (['self.dim'], {}), '(self.dim)\n', (13601, 13611), True, 'import numpy as np\n'), ((13681, 13698), 'numpy.ones', 'np.ones', (['self.dim'], {}), '(self.dim)\n', (13688, 13698), True, 'import numpy as np\n'), ((7840, 7870), 'numpy.sum', 'np.sum', (["self.factors['lambda']"], {}), "(self.factors['lambda'])\n", (7846, 7870), True, 'import numpy as np\n'), ((9407, 9439), 'numpy.dot', 'np.dot', (['A[:, i].T', 'A[:, min_idx]'], {}), '(A[:, i].T, A[:, min_idx])\n', (9413, 9439), True, 'import numpy as np\n'), ((1363, 1563), 'numpy.array', 'np.array', (['[1, 1, 2, 2, 3, 3, 2, 2, 1, 1, 0.5, 0.5, 0.25, 0.25, 1, 1, 2, 2, 3, 3, 2, 2,\n 1, 1, 0.5, 0.5, 1, 1, 2, 2, 3, 3, 2, 2, 1, 1, 1, 1, 2, 2, 3, 3, 2, 2, 1,\n 1, 2, 2, 3, 3, 1, 1, 2, 2, 1, 1]'], {}), '([1, 1, 2, 2, 3, 3, 2, 2, 1, 1, 0.5, 0.5, 0.25, 0.25, 1, 1, 2, 2, 3,\n 3, 2, 2, 1, 1, 0.5, 0.5, 1, 1, 2, 2, 3, 3, 2, 2, 1, 1, 1, 1, 2, 2, 3, 3,\n 2, 2, 1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 1, 1])\n', (1371, 1563), True, 'import numpy as np\n'), ((3970, 3981), 'numpy.ones', 'np.ones', (['(14)'], {}), '(14)\n', (3977, 3981), True, 'import numpy as np\n'), ((3988, 3999), 'numpy.ones', 'np.ones', (['(12)'], {}), '(12)\n', (3995, 3999), True, 'import numpy as np\n'), ((4006, 4017), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (4013, 4017), True, 'import numpy as np\n'), ((4024, 4034), 'numpy.ones', 'np.ones', (['(8)'], {}), '(8)\n', (4031, 4034), True, 'import numpy as np\n'), ((4042, 4052), 'numpy.ones', 'np.ones', (['(6)'], {}), '(6)\n', (4049, 4052), True, 'import numpy as np\n'), ((4060, 4070), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (4067, 4070), True, 'import numpy as np\n'), ((4078, 4088), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (4085, 4088), True, 'import numpy as np\n')] |
from typing import Union
import numpy as np
def manhattan(
x: Union[list, np.array], y: Union[list, np.array]
) -> Union[float, list, np.array]:
"""Calculate manhattan distance between two points.
The distance between two points measured along axes at right angles.
Args:
x: Point x
y: Point y
Returns:
"""
if isinstance(x, list):
x = np.array(x)
if isinstance(y, list):
y = np.array(y)
if x.shape != y.shape or x.size != y.size:
raise ValueError(
f"Shape or size of x and y does not matches, x shape {x.shape}, y shape {y.shape}"
)
return np.abs(x[0] - y[0]) + np.abs(x[1] - y[1])
def euclidean(
x: Union[list, np.array], y: Union[list, np.array]
) -> Union[float, list, np.array]:
"""Calculate manhattan distance between two points.
The distance between two points measured along axes at right angles.
Args:
x: Point x
y: Point y
Returns:
"""
return np.sqrt(np.sum(np.square(x - y)))
| [
"numpy.abs",
"numpy.square",
"numpy.array"
] | [((393, 404), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (401, 404), True, 'import numpy as np\n'), ((445, 456), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (453, 456), True, 'import numpy as np\n'), ((646, 665), 'numpy.abs', 'np.abs', (['(x[0] - y[0])'], {}), '(x[0] - y[0])\n', (652, 665), True, 'import numpy as np\n'), ((668, 687), 'numpy.abs', 'np.abs', (['(x[1] - y[1])'], {}), '(x[1] - y[1])\n', (674, 687), True, 'import numpy as np\n'), ((1022, 1038), 'numpy.square', 'np.square', (['(x - y)'], {}), '(x - y)\n', (1031, 1038), True, 'import numpy as np\n')] |
from bodynavigation.advanced_segmentation import seg
import numpy as np
from loguru import logger
import h5py
import sed3
from bodynavigation.advanced_segmentation import lines
import skimage.io
import skimage
import skimage.transform
from bodynavigation.advanced_segmentation import CT_regression_tools
import matplotlib.pyplot as plt
# from bodynavigation.organ_detection import OrganDetection
def prepare_data(
imshape=256,
sdf_type="diaphragm_axial",
# sdf_type='coronal',
# sdf_type='sagittal',
# sdf_type='surface',
skip_h5=False,
n_data=40,
filename_prefix="",
):
"""
:param imshape:
:param sdf_type:
:param skip_h5:
:param n_data:
:param filename_prefix: used to prevent rewriting the files during testing
:return:
"""
c = 0
for i in range(n_data):
if i <= 19:
ss, data, voxelsize = seg.read_scan("3Dircadb1", i + 1)
else:
ss, data, voxelsize = seg.read_scan("sliver07", i - 19)
X_train = np.empty(
[len(data), imshape, imshape], dtype=np.float
) # more efficient
# for j in range(n_data):
for j in range(data.shape[0]):
img = CT_regression_tools.resize(data[j], imshape)
img = CT_regression_tools.normalize(img)
X_train[j] = img
Y_train = eval(f"ss.dist_to_{sdf_type}()")
Y_train = skimage.transform.resize(
np.asarray(Y_train),
[Y_train.shape[0], imshape, imshape],
preserve_range=True,
)
# sed3.show_slices(np.asarray(X_train[0:50]), np.asarray(Y_train[0:50]), slice_step=10, axis=2)
# plt.show()
if not skip_h5:
with h5py.File(f"{filename_prefix}sdf_{sdf_type}{imshape}.h5", "a") as h5f:
logger.debug(f"X_train={X_train.dtype}")
h5f.create_dataset(f"scan_{i}", data=X_train)
h5f.create_dataset(f"label_{i}", data=Y_train)
c += 1
logger.info(f"Scan n.{c} saved. i={i}")
if __name__ == "__main__":
# this will be skipped if file is imported but it will work if file is called from commandline
prepare_data()
| [
"bodynavigation.advanced_segmentation.CT_regression_tools.normalize",
"h5py.File",
"bodynavigation.advanced_segmentation.CT_regression_tools.resize",
"numpy.asarray",
"bodynavigation.advanced_segmentation.seg.read_scan",
"loguru.logger.info",
"loguru.logger.debug"
] | [((888, 921), 'bodynavigation.advanced_segmentation.seg.read_scan', 'seg.read_scan', (['"""3Dircadb1"""', '(i + 1)'], {}), "('3Dircadb1', i + 1)\n", (901, 921), False, 'from bodynavigation.advanced_segmentation import seg\n'), ((970, 1003), 'bodynavigation.advanced_segmentation.seg.read_scan', 'seg.read_scan', (['"""sliver07"""', '(i - 19)'], {}), "('sliver07', i - 19)\n", (983, 1003), False, 'from bodynavigation.advanced_segmentation import seg\n'), ((1212, 1256), 'bodynavigation.advanced_segmentation.CT_regression_tools.resize', 'CT_regression_tools.resize', (['data[j]', 'imshape'], {}), '(data[j], imshape)\n', (1238, 1256), False, 'from bodynavigation.advanced_segmentation import CT_regression_tools\n'), ((1275, 1309), 'bodynavigation.advanced_segmentation.CT_regression_tools.normalize', 'CT_regression_tools.normalize', (['img'], {}), '(img)\n', (1304, 1309), False, 'from bodynavigation.advanced_segmentation import CT_regression_tools\n'), ((1447, 1466), 'numpy.asarray', 'np.asarray', (['Y_train'], {}), '(Y_train)\n', (1457, 1466), True, 'import numpy as np\n'), ((2013, 2052), 'loguru.logger.info', 'logger.info', (['f"""Scan n.{c} saved. i={i}"""'], {}), "(f'Scan n.{c} saved. i={i}')\n", (2024, 2052), False, 'from loguru import logger\n'), ((1729, 1791), 'h5py.File', 'h5py.File', (['f"""{filename_prefix}sdf_{sdf_type}{imshape}.h5"""', '"""a"""'], {}), "(f'{filename_prefix}sdf_{sdf_type}{imshape}.h5', 'a')\n", (1738, 1791), False, 'import h5py\n'), ((1816, 1856), 'loguru.logger.debug', 'logger.debug', (['f"""X_train={X_train.dtype}"""'], {}), "(f'X_train={X_train.dtype}')\n", (1828, 1856), False, 'from loguru import logger\n')] |
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Executor for TensorFlow Transform."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import apache_beam as beam
import numpy as np
import six
import tensorflow as tf
import tensorflow_data_validation as tfdv
from tensorflow_data_validation.utils import batch_util
import tensorflow_transform as tft
from tensorflow_transform import impl_helper
import tensorflow_transform.beam as tft_beam
from tensorflow_transform.beam import analyzer_cache
from tensorflow_transform.beam import common as tft_beam_common
from tensorflow_transform.saved import saved_transform_io
from tensorflow_transform.tf_metadata import dataset_metadata
from tensorflow_transform.tf_metadata import dataset_schema
from tensorflow_transform.tf_metadata import metadata_io
from tensorflow_transform.tf_metadata import schema_utils
from typing import Any, Dict, Generator, List, Mapping, Sequence, Text, Tuple, Union, Optional
# pylint: disable=g-direct-tensorflow-import
from tensorflow.core.example import example_pb2
from tensorflow_metadata.proto.v0 import schema_pb2
from tensorflow_metadata.proto.v0 import statistics_pb2
# pylint: enable=g-direct-tensorflow-import
from tfx import types
from tfx.components.base import base_executor
from tfx.components.transform import common
from tfx.components.transform import labels
from tfx.components.transform import messages
from tfx.types import artifact_utils
from tfx.utils import import_utils
from tfx.utils import io_utils
RAW_EXAMPLE_KEY = 'raw_example'
# Schema to use if the input data should be decoded as raw example.
_RAW_EXAMPLE_SCHEMA = dataset_schema.from_feature_spec(
{RAW_EXAMPLE_KEY: tf.FixedLenFeature([], tf.string)})
# TODO(b/123519698): Simplify the code by removing the key structure.
_TRANSFORM_INTERNAL_FEATURE_FOR_KEY = '__TFT_PASS_KEY__'
# Default file name prefix for transformed_examples.
_DEFAULT_TRANSFORMED_EXAMPLES_PREFIX = 'transformed_examples'
# Temporary path inside transform_output used for tft.beam
# TODO(b/125451545): Provide a safe temp path from base executor instead.
_TEMP_DIR_IN_TRANSFORM_OUTPUT = '.temp_path'
# TODO(b/122478841): Move it to a common place that is shared across components.
class _Status(object):
"""Status that reports success or error status of an execution."""
def __init__(self, is_error, error_message=None):
self._is_error = is_error
self._error_message = error_message
@classmethod
def OK(cls):
"""Returns an ok Status."""
return _Status(False)
@classmethod
def Error(cls, error_message):
"""Returns an error Status with error message."""
return _Status(True, error_message)
@property
def error_message(self):
return self._error_message
class _Dataset(object):
"""Dataset to be analyzed and/or transformed.
It also contains bundle of stages of a single dataset through the transform
pipeline.
"""
_FILE_PATTERN_SUFFIX_LENGTH = 6
def __init__(self, file_pattern: Text, file_format: Text, data_format: Text,
metadata: dataset_metadata.DatasetMetadata):
"""Initialize a Dataset.
Args:
file_pattern: The file pattern of the dataset.
file_format: The file format of the dataset.
data_format: The data format of the dataset.
metadata: A DatasetMetadata object describing the dataset.
"""
self._file_pattern = file_pattern
self._file_format = file_format
self._data_format = data_format
self._metadata = metadata
@property
def file_pattern(self):
return self._file_pattern
@property
def file_pattern_suffix(self):
return os.path.join(
*self._file_pattern.split(os.sep)[-self._FILE_PATTERN_SUFFIX_LENGTH:])
@property
def data_format(self):
return self._data_format
@property
def file_format(self):
return self._file_format
@property
def metadata(self):
return self._metadata
@property
def encoded(self):
return self._encoded
@property
def decoded(self):
return self._decoded
@property
def transformed(self):
return self._transformed
# TODO(b/65115913): Remove this and the setter and instead chain the
# "encoding" only to the "Materialize" parts of the computation, just
# before (or within) _WriteExamples.
@property
def transformed_and_encoded(self):
return self._transformed_and_encoded
@encoded.setter
def encoded(self, val):
self._encoded = val
@decoded.setter
def decoded(self, val):
self._decoded = val
@transformed.setter
def transformed(self, val):
self._transformed = val
@transformed_and_encoded.setter
def transformed_and_encoded(self, val):
self._transformed_and_encoded = val
def _GetSchemaProto(
metadata: dataset_metadata.DatasetMetadata) -> schema_pb2.Schema:
"""Gets the schema proto associated with a DatasetMetadata.
This is needed because tensorflow_transform 0.13 and tensorflow_transform 0.14
have a different API for DatasetMetadata.
Args:
metadata: A dataset_metadata.DatasetMetadata.
Returns:
A schema_pb2.Schema.
"""
# `schema` is either a Schema proto or dataset_schema.Schema.
schema = metadata.schema
# In the case where it's a dataset_schema.Schema, fetch the schema proto.
return getattr(schema, '_schema_proto', schema)
class Executor(base_executor.BaseExecutor):
"""Transform executor."""
def Do(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any]) -> None:
"""TensorFlow Transform executor entrypoint.
This implements BaseExecutor.Do() and is invoked by orchestration systems.
This is not inteded for manual usage or further customization. Please use
the Transform() function which takes an input format with no artifact
dependency.
Args:
input_dict: Input dict from input key to a list of artifacts, including:
- input_data: A list of 'ExamplesPath' type which should contain two
splits 'train' and 'eval'.
- schema: A list of 'SchemaPath' type which should contain a single
schema artifact.
output_dict: Output dict from key to a list of artifacts, including:
- transform_output: Output of 'tf.Transform', which includes an exported
Tensorflow graph suitable for both training and serving;
- transformed_examples: Materialized transformed examples, which
includes both 'train' and 'eval' splits.
exec_properties: A dict of execution properties, including either one of:
- module_file: The file path to a python module file, from which the
'preprocessing_fn' function will be loaded.
- preprocessing_fn: The module path to a python function that
implements 'preprocessing_fn'.
Returns:
None
"""
self._log_startup(input_dict, output_dict, exec_properties)
train_data_uri = artifact_utils.get_split_uri(input_dict['input_data'],
'train')
eval_data_uri = artifact_utils.get_split_uri(input_dict['input_data'],
'eval')
schema_file = io_utils.get_only_uri_in_dir(
artifact_utils.get_single_uri(input_dict['schema']))
transform_output = artifact_utils.get_single_uri(
output_dict['transform_output'])
transformed_train_output = artifact_utils.get_split_uri(
output_dict['transformed_examples'], 'train')
transformed_eval_output = artifact_utils.get_split_uri(
output_dict['transformed_examples'], 'eval')
temp_path = os.path.join(transform_output, _TEMP_DIR_IN_TRANSFORM_OUTPUT)
tf.logging.debug('Using temp path %s for tft.beam', temp_path)
def _GetCachePath(label, params_dict):
if label not in params_dict:
return None
else:
return artifact_utils.get_single_uri(params_dict[label])
label_inputs = {
labels.COMPUTE_STATISTICS_LABEL:
False,
labels.SCHEMA_PATH_LABEL:
schema_file,
labels.EXAMPLES_DATA_FORMAT_LABEL:
labels.FORMAT_TF_EXAMPLE,
labels.ANALYZE_AND_TRANSFORM_DATA_PATHS_LABEL:
io_utils.all_files_pattern(train_data_uri),
labels.TRANSFORM_ONLY_DATA_PATHS_LABEL:
io_utils.all_files_pattern(eval_data_uri),
labels.TFT_STATISTICS_USE_TFDV_LABEL:
True,
labels.MODULE_FILE:
exec_properties.get('module_file', None),
labels.PREPROCESSING_FN:
exec_properties.get('preprocessing_fn', None),
}
cache_input = _GetCachePath('cache_input_path', input_dict)
if cache_input is not None:
label_inputs[labels.CACHE_INPUT_PATH_LABEL] = cache_input
label_outputs = {
labels.TRANSFORM_METADATA_OUTPUT_PATH_LABEL: transform_output,
labels.TRANSFORM_MATERIALIZE_OUTPUT_PATHS_LABEL: [
os.path.join(transformed_train_output,
_DEFAULT_TRANSFORMED_EXAMPLES_PREFIX),
os.path.join(transformed_eval_output,
_DEFAULT_TRANSFORMED_EXAMPLES_PREFIX),
],
labels.TEMP_OUTPUT_LABEL: str(temp_path),
}
cache_output = _GetCachePath('cache_output_path', output_dict)
if cache_output is not None:
label_outputs[labels.CACHE_OUTPUT_PATH_LABEL] = cache_output
status_file = 'status_file' # Unused
self.Transform(label_inputs, label_outputs, status_file)
tf.logging.info('Cleaning up temp path %s on executor success', temp_path)
io_utils.delete_dir(temp_path)
@staticmethod
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
@beam.typehints.with_output_types(beam.pvalue.PDone)
def _IncrementColumnUsageCounter(pipeline: beam.Pipeline,
total_columns_count: int,
analyze_columns_count: int,
transform_columns_count: int):
"""A beam PTransform to increment counters of column usage."""
def _MakeAndIncrementCounters(_):
"""Increment column usage counters."""
beam.metrics.Metrics.counter(
tft_beam_common.METRICS_NAMESPACE,
'total_columns_count').inc(total_columns_count)
beam.metrics.Metrics.counter(
tft_beam_common.METRICS_NAMESPACE,
'analyze_columns_count').inc(analyze_columns_count)
beam.metrics.Metrics.counter(
tft_beam_common.METRICS_NAMESPACE,
'transform_columns_count').inc(transform_columns_count)
return None
return (
pipeline
| 'CreateNone' >> beam.Create([None])
| 'IncrementColumnUsageCounter' >> beam.Map(_MakeAndIncrementCounters))
@staticmethod
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
# TODO(b/122478841): Obviate the bytes (key part).
@beam.typehints.with_output_types(
beam.typehints.KV[bytes, beam.typehints.Union[bytes, example_pb2.Example]]
)
def _ReadExamples(pipeline: beam.Pipeline,
dataset: _Dataset) -> beam.pvalue.PCollection:
"""Reads examples from the given `dataset`.
Args:
pipeline: beam pipeline.
dataset: A `_Dataset` object that represents the data to read.
Returns:
A PCollection containing KV pairs of exapmles.
"""
result = (
pipeline
| 'Read' >> beam.io.ReadFromTFRecord(
dataset.file_pattern,
coder=beam.coders.BytesCoder(),
# TODO(b/114938612): Eventually remove this override.
validate=False)
| 'AddKey' >> beam.Map(lambda x: (None, x)))
if dataset.data_format == labels.FORMAT_TF_EXAMPLE:
result |= (
'ParseExamples' >>
beam.Map(lambda kv: (kv[0], example_pb2.Example.FromString(kv[1]))))
# TODO(b/122478841): Figure out telemetry in beam.
return result
@staticmethod
@beam.ptransform_fn
@beam.typehints.with_input_types(
beam.typehints.KV[bytes, example_pb2.Example])
@beam.typehints.with_output_types(beam.pvalue.PDone)
def _WriteExamples(pcollection: beam.pvalue.PCollection,
unused_file_format: Any,
transformed_example_path: Text) -> beam.pvalue.PDone:
"""Writes transformed examples compressed in gzip format.
Args:
pcollection: PCollection of transformed examples.
unused_file_format: file format, unused.
transformed_example_path: path to write to.
Returns:
beam.pvalue.PDone.
"""
return (pcollection
| 'DropNoneKeys' >> beam.Values()
| 'Write' >> beam.io.WriteToTFRecord(
transformed_example_path,
file_name_suffix='.gz',
coder=beam.coders.ProtoCoder(example_pb2.Example)))
def _GetSchema(self, schema_path: Text) -> schema_pb2.Schema:
"""Gets a tf.metadata schema.
Args:
schema_path: Path to schema file.
Returns:
A tf.metadata schema.
"""
schema_reader = io_utils.SchemaReader()
return schema_reader.read(schema_path)
def _ReadMetadata(self, data_format: Text,
schema_path: Text) -> dataset_metadata.DatasetMetadata:
"""Returns a dataset_metadata.DatasetMetadata for the input data.
Args:
data_format: name of the input data format.
schema_path: path to schema file.
Returns:
A dataset_metadata.DatasetMetadata representing the provided set of
columns.
"""
if self._ShouldDecodeAsRawExample(data_format):
return dataset_metadata.DatasetMetadata(_RAW_EXAMPLE_SCHEMA)
schema_proto = self._GetSchema(schema_path)
# For compatibility with tensorflow_transform 0.13 and 0.14, we create and
# then update a DatasetMetadata.
result = dataset_metadata.DatasetMetadata(dataset_schema.Schema({}))
_GetSchemaProto(result).CopyFrom(schema_proto)
return result
@staticmethod
@beam.ptransform_fn
@beam.typehints.with_input_types(
beam.typehints.Dict[str, beam.typehints.Any]) # TFDV format.
@beam.typehints.with_output_types(beam.pvalue.PDone)
def _GenerateStats(
pcollection: beam.pvalue.PCollection,
stats_output_path: Text,
schema: schema_pb2.Schema,
use_tfdv=True,
use_deep_copy_optimization=False # pylint: disable=unused-argument
) -> beam.pvalue.PDone:
"""Generates statistics.
Args:
pcollection: PCollection of examples.
stats_output_path: path where statistics is written to.
schema: schema.
use_tfdv: whether use TFDV for computing statistics.
use_deep_copy_optimization: whether use deep copy optimization.
Returns:
beam.pvalue.PDone.
"""
if not use_tfdv:
raise ValueError(
'TFDV is not used for stats. Please provide althernatives.')
# pylint: disable=no-value-for-parameter
return (pcollection
| 'ComputeTFDVStats' >> Executor._ComputeTFDVStats(schema)
| 'WriteStats' >> Executor._WriteStats(stats_output_path))
@staticmethod
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.typehints.Dict[str, beam.typehints.Any])
@beam.typehints.with_output_types(statistics_pb2.DatasetFeatureStatisticsList)
def _ComputeTFDVStats(pcollection: beam.pvalue.PCollection,
schema: schema_pb2.Schema) -> beam.pvalue.PCollection:
"""Cmoputes Statistics with TFDV.
Args:
pcollection: pcollection of examples.
schema: schema.
Returns:
PCollection of `DatasetFeatureStatisticsList`.
"""
feature_specs_from_schema = schema_utils.schema_as_feature_spec(
schema).feature_spec
def EncodeTFDV(element, feature_specs):
"""Encodes element in an in-memory format that TFDV expects."""
if _TRANSFORM_INTERNAL_FEATURE_FOR_KEY not in element:
raise ValueError(
'Expected _TRANSFORM_INTERNAL_FEATURE_FOR_KEY ({}) to exist in the '
'input but not found.'.format(_TRANSFORM_INTERNAL_FEATURE_FOR_KEY))
# TODO(b/123549935): Obviate the numpy array conversions by
# allowing TFDV to accept primitives in general, and TFT's
# input/output format in particular.
result = {}
for feature_name, feature_spec in six.iteritems(feature_specs):
feature_value = element.get(feature_name)
if feature_value is None:
result[feature_name] = None
elif isinstance(feature_value, (np.ndarray, list)):
result[feature_name] = np.asarray(
feature_value, feature_spec.dtype.as_numpy_dtype)
else:
result[feature_name] = np.asarray(
[feature_value], dtype=feature_spec.dtype.as_numpy_dtype)
return result
result = (pcollection
# TODO(kestert): Remove encoding and batching steps once TFT
# supports Arrow tables.
| 'EncodeTFDV' >> beam.Map(
EncodeTFDV, feature_specs=feature_specs_from_schema))
# TODO(pachristopher): Remove this once TFDV 0.14 is released.
(major, minor, _) = tfdv.__version__.split('.')
if int(major) > 0 or int(minor) >= 14:
result |= ('BatchExamplesToArrowTables' >>
batch_util.BatchExamplesToArrowTables())
return (result
| 'ComputeFeatureStatisticsTFDV' >> tfdv.GenerateStatistics(
tfdv.StatsOptions(schema=schema)))
@staticmethod
@beam.ptransform_fn
@beam.typehints.with_input_types(statistics_pb2.DatasetFeatureStatisticsList)
@beam.typehints.with_output_types(beam.pvalue.PDone)
def _WriteStats(pcollection_stats: beam.pvalue.PCollection,
stats_output_path: Text) -> beam.pvalue.PDone:
"""Writs Statistics outputs.
Args:
pcollection_stats: pcollection of statistics.
stats_output_path: path to write statistics.
Returns:
beam.pvalue.PDone.
"""
# TODO(b/68765333): Investigate if this can be avoided.
tf.gfile.MakeDirs(os.path.dirname(stats_output_path))
# TODO(b/117601471): Replace with utility method to write stats.
return (pcollection_stats | 'Write' >> beam.io.WriteToText(
stats_output_path,
append_trailing_newlines=False,
shard_name_template='', # To force unsharded output.
coder=beam.coders.ProtoCoder(
statistics_pb2.DatasetFeatureStatisticsList)))
@staticmethod
@beam.ptransform_fn
@beam.typehints.with_input_types(
beam.typehints.KV[bytes, beam.typehints.Union[bytes, example_pb2.Example]]
)
@beam.typehints.with_output_types(
beam.typehints.Dict[str, beam.typehints.Any])
def _DecodeInputs(pcol: beam.pvalue.PCollection,
decode_fn: Any) -> beam.pvalue.PCollection:
"""Decodes the given PCollection while handling KV data.
Args:
pcol: PCollection of data.
decode_fn: Function used to decode data.
Returns:
PCollection of decoded data.
"""
def decode_example(
kv_pair: Mapping[bytes, Union[bytes, example_pb2.Example]]
) -> Mapping[Text, Any]: # pylint: disable=invalid-name
"""Decodes a single example."""
(key, elem) = kv_pair
result = decode_fn(elem)
if _TRANSFORM_INTERNAL_FEATURE_FOR_KEY in result:
raise ValueError('"{}" is a reserved feature name, '
'it should not be present in the dataset.'.format(
_TRANSFORM_INTERNAL_FEATURE_FOR_KEY))
result[_TRANSFORM_INTERNAL_FEATURE_FOR_KEY] = key
return result
return pcol | 'ApplyDecodeFn' >> beam.Map(decode_example)
@beam.typehints.with_input_types(
beam.typehints.Dict[str, beam.typehints.Any], metadata=beam.typehints.Any)
@beam.typehints.with_output_types(
beam.typehints.KV[beam.typehints.Union[None, bytes], example_pb2.Example])
class _EncodeAsExamples(beam.DoFn):
"""Encodes data as tf.Examples based on the given metadata."""
def __init__(self):
self._coder = None
def process(self, element: Dict[Text, Any],
metadata: Any) -> Generator[Tuple[Any, Any], None, None]:
if self._coder is None:
self._coder = tft.coders.ExampleProtoCoder(
metadata.schema, serialized=False)
# Make sure that the synthetic key feature doesn't get encoded.
assert _TRANSFORM_INTERNAL_FEATURE_FOR_KEY in element
key = element[_TRANSFORM_INTERNAL_FEATURE_FOR_KEY]
element_copy = element.copy()
del element_copy[_TRANSFORM_INTERNAL_FEATURE_FOR_KEY]
yield (key, self._coder.encode(element_copy))
@staticmethod
@beam.ptransform_fn
@beam.typehints.with_input_types(beam.Pipeline)
def _OptimizeRun(
pipeline: beam.Pipeline, input_cache_dir: Text, output_cache_dir: Text,
analyze_data_list: List[_Dataset], feature_spec: Mapping[Text, Any],
preprocessing_fn: Any, cache_source: beam.PTransform
) -> Tuple[Dict[Text, Optional[_Dataset]], Dict[Text, Dict[
Text, beam.pvalue.PCollection]], bool]:
"""Utilizes TFT cache if applicable and removes unused datasets."""
analysis_key_to_dataset = {
analyzer_cache.make_dataset_key(dataset.file_pattern_suffix): dataset
for dataset in analyze_data_list
}
if input_cache_dir is not None:
input_cache = pipeline | analyzer_cache.ReadAnalysisCacheFromFS(
input_cache_dir,
list(analysis_key_to_dataset.keys()),
source=cache_source)
elif output_cache_dir is not None:
input_cache = {}
else:
# Using None here to indicate that this pipeline will not read or write
# cache.
input_cache = None
if input_cache is None:
# Cache is disabled so we won't be filtering out any datasets, and will
# always perform a flatten over all of them.
filtered_analysis_dataset_keys = list(analysis_key_to_dataset.keys())
flat_data_required = True
else:
filtered_analysis_dataset_keys, flat_data_required = (
tft_beam.analysis_graph_builder.get_analysis_dataset_keys(
preprocessing_fn, feature_spec,
list(analysis_key_to_dataset.keys()), input_cache))
new_analyze_data_dict = {}
for key, dataset in six.iteritems(analysis_key_to_dataset):
if key in filtered_analysis_dataset_keys:
new_analyze_data_dict[key] = dataset
else:
new_analyze_data_dict[key] = None
return (new_analyze_data_dict, input_cache, flat_data_required)
def _GetPreprocessingFn(self, inputs: Mapping[Text, Any],
unused_outputs: Mapping[Text, Any]) -> Any:
"""Returns a user defined preprocessing_fn.
Args:
inputs: A dictionary of labelled input values.
unused_outputs: A dictionary of labelled output values.
Returns:
User defined function.
Raises:
ValueError: When neither or both of MODULE_FILE and PREPROCESSING_FN
are present in inputs.
"""
has_module_file = bool(
common.GetSoleValue(inputs, labels.MODULE_FILE, strict=False))
has_preprocessing_fn = bool(
common.GetSoleValue(inputs, labels.PREPROCESSING_FN, strict=False))
if has_module_file == has_preprocessing_fn:
raise ValueError(
'Neither or both of MODULE_FILE and PREPROCESSING_FN have been '
'supplied in inputs.')
if has_module_file:
return import_utils.import_func_from_source(
common.GetSoleValue(inputs, labels.MODULE_FILE), 'preprocessing_fn')
preprocessing_fn_path_split = common.GetSoleValue(
inputs, labels.PREPROCESSING_FN).split('.')
return import_utils.import_func_from_module(
'.'.join(preprocessing_fn_path_split[0:-1]),
preprocessing_fn_path_split[-1])
# TODO(b/122478841): Refine this API in following cls.
# Note: This API is up to change.
def Transform(self, inputs: Mapping[Text, Any], outputs: Mapping[Text, Any],
status_file: Text) -> None:
"""Executes on request.
This is the implementation part of transform executor. This is intended for
using or extending the executor without artifact dependency.
Args:
inputs: A dictionary of labelled input values, including:
- labels.COMPUTE_STATISTICS_LABEL: Whether compute statistics.
- labels.SCHEMA_PATH_LABEL: Path to schema file.
- labels.EXAMPLES_FILE_FORMAT_LABEL: Example file format, optional.
- labels.EXAMPLES_DATA_FORMAT_LABEL: Example data format.
- labels.ANALYZE_AND_TRANSFORM_DATA_PATHS_LABEL: Paths or path patterns
to analyze and transform data.
- labels.TRANSFORM_DATA_PATHS_LABEL: Paths or path patterns to transform
only data.
- labels.TFT_STATISTICS_USE_TFDV_LABEL: Whether use tfdv to compute
statistics.
- labels.MODULE_FILE: Path to a Python module that contains the
preprocessing_fn, optional.
- labels.PREPROCESSING_FN: Path to a Python function that implements
preprocessing_fn, optional.
outputs: A dictionary of labelled output values, including:
- labels.PER_SET_STATS_OUTPUT_PATHS_LABEL: Paths to statistics output,
optional.
- labels.TRANSFORM_METADATA_OUTPUT_PATH_LABEL: A path to
TFTransformOutput output.
- labels.TRANSFORM_MATERIALIZE_OUTPUT_PATHS_LABEL: Paths to transform
materialization.
- labels.TEMP_OUTPUT_LABEL: A path to temporary directory.
status_file: Where the status should be written (not yet implemented)
"""
del status_file # unused
compute_statistics = common.GetSoleValue(inputs,
labels.COMPUTE_STATISTICS_LABEL)
transform_output_path = common.GetSoleValue(
outputs, labels.TRANSFORM_METADATA_OUTPUT_PATH_LABEL)
raw_examples_data_format = common.GetSoleValue(
inputs, labels.EXAMPLES_DATA_FORMAT_LABEL)
schema = common.GetSoleValue(inputs, labels.SCHEMA_PATH_LABEL)
input_dataset_metadata = self._ReadMetadata(raw_examples_data_format,
schema)
tf.logging.info('Inputs to executor.Transform function: {}'.format(inputs))
tf.logging.info(
'Outputs to executor.Transform function: {}'.format(outputs))
feature_spec = schema_utils.schema_as_feature_spec(
_GetSchemaProto(input_dataset_metadata)).feature_spec
# NOTE: We disallow an empty schema, which we detect by testing the
# number of columns. While in principal an empty schema is valid, in
# practice this is a sign of a user error, and this is a convenient
# place to catch that error.
if (not feature_spec and
not self._ShouldDecodeAsRawExample(raw_examples_data_format)):
raise ValueError(messages.SCHEMA_EMPTY)
preprocessing_fn = self._GetPreprocessingFn(inputs, outputs)
materialize_output_paths = common.GetValues(
outputs, labels.TRANSFORM_MATERIALIZE_OUTPUT_PATHS_LABEL)
# Inspecting the preprocessing_fn even if we know we need a full pass in
# order to fail faster if it fails.
try:
analyze_input_columns = tft.get_analyze_input_columns(
preprocessing_fn, feature_spec)
except AttributeError:
# If using TFT 1.12, fall back to assuming all features are used.
analyze_input_columns = feature_spec.keys()
if not compute_statistics and not materialize_output_paths:
if analyze_input_columns:
tf.logging.warning(
'Not using the in-place Transform because the following features '
'require analyzing: {}'.format(
tuple(c for c in analyze_input_columns)))
else:
tf.logging.warning(
'Using the in-place Transform since compute_statistics=False, '
'it does not materialize transformed data, and the configured '
'preprocessing_fn appears to not require analyzing the data.')
self._RunInPlaceImpl(preprocessing_fn, input_dataset_metadata,
transform_output_path)
# TODO(b/122478841): Writes status to status file.
return
self._RunBeamImpl(inputs, outputs, preprocessing_fn, input_dataset_metadata,
raw_examples_data_format, transform_output_path,
compute_statistics, materialize_output_paths)
# TODO(b/122478841): Writes status to status file.
def _RunBeamImpl(self, inputs: Mapping[Text, Any],
outputs: Mapping[Text, Any], preprocessing_fn: Any,
input_dataset_metadata: dataset_metadata.DatasetMetadata,
raw_examples_data_format: Text, transform_output_path: Text,
compute_statistics: bool,
materialize_output_paths: Sequence[Text]) -> _Status:
"""Perform data preprocessing with FlumeC++ runner.
Args:
inputs: A dictionary of labelled input values.
outputs: A dictionary of labelled output values.
preprocessing_fn: The tf.Transform preprocessing_fn.
input_dataset_metadata: A DatasetMetadata object for the input data.
raw_examples_data_format: A string describing the raw data format.
transform_output_path: An absolute path to write the output to.
compute_statistics: A bool indicating whether or not compute statistics.
materialize_output_paths: Paths to materialized outputs.
Raises:
RuntimeError: If reset() is not being invoked between two run().
ValueError: If the schema is empty.
Returns:
Status of the execution.
"""
raw_examples_file_format = common.GetSoleValue(
inputs, labels.EXAMPLES_FILE_FORMAT_LABEL, strict=False)
analyze_and_transform_data_paths = common.GetValues(
inputs, labels.ANALYZE_AND_TRANSFORM_DATA_PATHS_LABEL)
transform_only_data_paths = common.GetValues(
inputs, labels.TRANSFORM_ONLY_DATA_PATHS_LABEL)
stats_use_tfdv = common.GetSoleValue(inputs,
labels.TFT_STATISTICS_USE_TFDV_LABEL)
per_set_stats_output_paths = common.GetValues(
outputs, labels.PER_SET_STATS_OUTPUT_PATHS_LABEL)
temp_path = common.GetSoleValue(outputs, labels.TEMP_OUTPUT_LABEL)
input_cache_dir = common.GetSoleValue(
inputs, labels.CACHE_INPUT_PATH_LABEL, strict=False)
output_cache_dir = common.GetSoleValue(
outputs, labels.CACHE_OUTPUT_PATH_LABEL, strict=False)
tf.logging.info('Analyze and transform data patterns: %s',
list(enumerate(analyze_and_transform_data_paths)))
tf.logging.info('Transform data patterns: %s',
list(enumerate(transform_only_data_paths)))
tf.logging.info('Transform materialization output paths: %s',
list(enumerate(materialize_output_paths)))
tf.logging.info('Transform output path: %s', transform_output_path)
feature_spec = schema_utils.schema_as_feature_spec(
_GetSchemaProto(input_dataset_metadata)).feature_spec
try:
analyze_input_columns = tft.get_analyze_input_columns(
preprocessing_fn, feature_spec)
transform_input_columns = (
tft.get_transform_input_columns(preprocessing_fn, feature_spec))
except AttributeError:
# If using TFT 1.12, fall back to assuming all features are used.
analyze_input_columns = feature_spec.keys()
transform_input_columns = feature_spec.keys()
# Use the same dataset (same columns) for AnalyzeDataset and computing
# pre-transform stats so that the data will only be read once for these
# two operations.
if compute_statistics:
analyze_input_columns = list(
set(list(analyze_input_columns) + list(transform_input_columns)))
if input_dataset_metadata.schema is _RAW_EXAMPLE_SCHEMA:
analyze_input_dataset_metadata = input_dataset_metadata
transform_input_dataset_metadata = input_dataset_metadata
else:
analyze_input_dataset_metadata = dataset_metadata.DatasetMetadata(
dataset_schema.from_feature_spec(
{feature: feature_spec[feature]
for feature in analyze_input_columns}))
transform_input_dataset_metadata = dataset_metadata.DatasetMetadata(
dataset_schema.from_feature_spec(
{feature: feature_spec[feature]
for feature in transform_input_columns}))
can_process_jointly = not bool(per_set_stats_output_paths or
materialize_output_paths or output_cache_dir)
analyze_data_list = self._MakeDatasetList(
analyze_and_transform_data_paths, raw_examples_file_format,
raw_examples_data_format, analyze_input_dataset_metadata,
can_process_jointly)
transform_data_list = self._MakeDatasetList(
list(analyze_and_transform_data_paths) +
list(transform_only_data_paths), raw_examples_file_format,
raw_examples_data_format, transform_input_dataset_metadata,
can_process_jointly)
desired_batch_size = self._GetDesiredBatchSize(raw_examples_data_format)
with self._CreatePipeline(outputs) as p:
with tft_beam.Context(
temp_dir=temp_path,
desired_batch_size=desired_batch_size,
passthrough_keys={_TRANSFORM_INTERNAL_FEATURE_FOR_KEY},
use_deep_copy_optimization=True):
# pylint: disable=expression-not-assigned
# pylint: disable=no-value-for-parameter
_ = (
p | self._IncrementColumnUsageCounter(
len(feature_spec.keys()), len(analyze_input_columns),
len(transform_input_columns)))
(new_analyze_data_dict, input_cache, flat_data_required) = (
p | self._OptimizeRun(input_cache_dir, output_cache_dir,
analyze_data_list, feature_spec,
preprocessing_fn, self._GetCacheSource()))
# Removing unneeded datasets if they won't be needed for statistics or
# materialization.
if not materialize_output_paths and not compute_statistics:
analyze_data_list = [
d for d in new_analyze_data_dict.values() if d is not None
]
if len(analyze_data_list) < len(new_analyze_data_dict):
tf.logging.info(
'Not reading the following datasets due to cache: %s', [
dataset.file_pattern_suffix
for dataset in analyze_data_list
if dataset not in new_analyze_data_dict.values()
])
analyze_decode_fn = (
self._GetDecodeFunction(raw_examples_data_format,
analyze_input_dataset_metadata.schema))
for (idx, dataset) in enumerate(analyze_data_list):
dataset.encoded = (
p | 'ReadAnalysisDataset[{}]'.format(idx) >>
self._ReadExamples(dataset))
dataset.decoded = (
dataset.encoded
| 'DecodeAnalysisDataset[{}]'.format(idx) >>
self._DecodeInputs(analyze_decode_fn))
input_analysis_data = {}
for key, dataset in six.iteritems(new_analyze_data_dict):
if dataset is None:
input_analysis_data[key] = None
else:
input_analysis_data[key] = dataset.decoded
if flat_data_required:
flat_input_analysis_data = (
[dataset.decoded for dataset in analyze_data_list]
| 'FlattenAnalysisDatasets' >> beam.Flatten(pipeline=p))
else:
flat_input_analysis_data = None
if input_cache:
tf.logging.info('Analyzing data with cache.')
transform_fn, cache_output = (
(flat_input_analysis_data, input_analysis_data, input_cache,
input_dataset_metadata)
| 'AnalyzeDataset' >> tft_beam.AnalyzeDatasetWithCache(
preprocessing_fn, pipeline=p))
# Write the raw/input metadata.
(input_dataset_metadata
| 'WriteMetadata' >> tft_beam.WriteMetadata(
os.path.join(transform_output_path,
tft.TFTransformOutput.RAW_METADATA_DIR), p))
# WriteTransformFn writes transform_fn and metadata to subdirectories
# tensorflow_transform.SAVED_MODEL_DIR and
# tensorflow_transform.TRANSFORMED_METADATA_DIR respectively.
(transform_fn |
'WriteTransformFn' >> tft_beam.WriteTransformFn(transform_output_path))
if output_cache_dir is not None and cache_output is not None:
# TODO(b/37788560): Possibly make this part of the beam graph.
tf.io.gfile.makedirs(output_cache_dir)
tf.logging.info('Using existing cache in: %s', input_cache_dir)
if input_cache_dir is not None:
# Only copy cache that is relevant to this iteration. This is
# assuming that this pipeline operates on rolling ranges, so those
# cache entries may also be relevant for future iterations.
for span_cache_dir in input_analysis_data:
full_span_cache_dir = os.path.join(input_cache_dir,
span_cache_dir)
if tf.io.gfile.isdir(full_span_cache_dir):
self._CopyCache(full_span_cache_dir,
os.path.join(output_cache_dir, span_cache_dir))
(cache_output
| 'WriteCache' >> analyzer_cache.WriteAnalysisCacheToFS(
p, output_cache_dir, sink=self._GetCacheSink()))
if compute_statistics or materialize_output_paths:
# Do not compute pre-transform stats if the input format is raw proto,
# as StatsGen would treat any input as tf.Example.
if (compute_statistics and
not self._IsDataFormatProto(raw_examples_data_format)):
# Aggregated feature stats before transformation.
pre_transform_feature_stats_path = os.path.join(
transform_output_path,
tft.TFTransformOutput.PRE_TRANSFORM_FEATURE_STATS_PATH)
schema_proto = _GetSchemaProto(analyze_input_dataset_metadata)
([
dataset.decoded if stats_use_tfdv else dataset.encoded
for dataset in analyze_data_list
]
| 'FlattenPreTransformAnalysisDatasets' >> beam.Flatten(pipeline=p)
| 'GenerateAggregatePreTransformAnalysisStats' >>
self._GenerateStats(
pre_transform_feature_stats_path,
schema_proto,
use_deep_copy_optimization=True,
use_tfdv=stats_use_tfdv))
transform_decode_fn = (
self._GetDecodeFunction(raw_examples_data_format,
transform_input_dataset_metadata.schema))
# transform_data_list is a superset of analyze_data_list, we pay the
# cost to read the same dataset (analyze_data_list) again here to
# prevent certain beam runner from doing large temp materialization.
for (idx, dataset) in enumerate(transform_data_list):
dataset.encoded = (
p
| 'ReadTransformDataset[{}]'.format(idx) >>
self._ReadExamples(dataset))
dataset.decoded = (
dataset.encoded
| 'DecodeTransformDataset[{}]'.format(idx) >>
self._DecodeInputs(transform_decode_fn))
(dataset.transformed,
metadata) = (((dataset.decoded, transform_input_dataset_metadata),
transform_fn)
| 'TransformDataset[{}]'.format(idx) >>
tft_beam.TransformDataset())
if materialize_output_paths or not stats_use_tfdv:
dataset.transformed_and_encoded = (
dataset.transformed
| 'EncodeTransformedDataset[{}]'.format(idx) >> beam.ParDo(
self._EncodeAsExamples(), metadata))
if compute_statistics:
# Aggregated feature stats after transformation.
_, metadata = transform_fn
post_transform_feature_stats_path = os.path.join(
transform_output_path,
tft.TFTransformOutput.POST_TRANSFORM_FEATURE_STATS_PATH)
# TODO(b/70392441): Retain tf.Metadata (e.g., IntDomain) in
# schema. Currently input dataset schema only contains dtypes,
# and other metadata is dropped due to roundtrip to tensors.
transformed_schema_proto = _GetSchemaProto(metadata)
([(dataset.transformed
if stats_use_tfdv else dataset.transformed_and_encoded)
for dataset in transform_data_list]
| 'FlattenPostTransformAnalysisDatasets' >> beam.Flatten()
| 'GenerateAggregatePostTransformAnalysisStats' >>
self._GenerateStats(
post_transform_feature_stats_path,
transformed_schema_proto,
use_tfdv=stats_use_tfdv))
if per_set_stats_output_paths:
assert len(transform_data_list) == len(per_set_stats_output_paths)
# TODO(b/67632871): Remove duplicate stats gen compute that is
# done both on a flattened view of the data, and on each span
# below.
bundles = zip(transform_data_list, per_set_stats_output_paths)
for (idx, (dataset, output_path)) in enumerate(bundles):
if stats_use_tfdv:
data = dataset.transformed
else:
data = dataset.transformed_and_encoded
(data
| 'GeneratePostTransformStats[{}]'.format(idx) >>
self._GenerateStats(
output_path,
transformed_schema_proto,
use_tfdv=stats_use_tfdv))
if materialize_output_paths:
assert len(transform_data_list) == len(materialize_output_paths)
bundles = zip(transform_data_list, materialize_output_paths)
for (idx, (dataset, output_path)) in enumerate(bundles):
(dataset.transformed_and_encoded
| 'Materialize[{}]'.format(idx) >> self._WriteExamples(
raw_examples_file_format, output_path))
return _Status.OK()
def _RunInPlaceImpl(self, preprocessing_fn: Any,
metadata: dataset_metadata.DatasetMetadata,
transform_output_path: Text) -> _Status:
"""Runs a transformation iteration in-place without looking at the data.
Args:
preprocessing_fn: The tf.Transform preprocessing_fn.
metadata: A DatasetMetadata object for the input data.
transform_output_path: An absolute path to write the output to.
Returns:
Status of the execution.
"""
tf.logging.info('Processing an in-place transform')
raw_metadata_dir = os.path.join(transform_output_path,
tft.TFTransformOutput.RAW_METADATA_DIR)
metadata_io.write_metadata(metadata, raw_metadata_dir)
with tf.Graph().as_default() as graph:
with tf.Session(graph=graph) as sess:
input_signature = impl_helper.feature_spec_as_batched_placeholders(
schema_utils.schema_as_feature_spec(
_GetSchemaProto(metadata)).feature_spec)
# In order to avoid a bug where import_graph_def fails when the
# input_map and return_elements of an imported graph are the same
# (b/34288791), we avoid using the placeholder of an input column as an
# output of a graph. We do this by applying tf.identity to all inputs of
# the preprocessing_fn. Note this applies at the level of raw tensors.
# TODO(b/34288791): Remove this workaround and use a shallow copy of
# inputs instead. A shallow copy is needed in case
# self._preprocessing_fn mutates its input.
copied_inputs = impl_helper.copy_tensors(input_signature)
output_signature = preprocessing_fn(copied_inputs)
sess.run(tf.global_variables_initializer())
sess.run(tf.tables_initializer())
transform_fn_path = os.path.join(transform_output_path,
tft.TFTransformOutput.TRANSFORM_FN_DIR)
saved_transform_io.write_saved_transform_from_session(
sess, input_signature, output_signature, transform_fn_path)
transformed_metadata = dataset_metadata.DatasetMetadata(
schema=tft.schema_inference.infer_feature_schema(
output_signature, graph, sess))
transformed_metadata_dir = os.path.join(
transform_output_path, tft.TFTransformOutput.TRANSFORMED_METADATA_DIR)
metadata_io.write_metadata(transformed_metadata, transformed_metadata_dir)
return _Status.OK()
def _CreatePipeline(self,
unused_outputs: Mapping[Text, Any]) -> beam.Pipeline:
"""Creates beam pipeline.
Args:
unused_outputs: A dictionary of labelled output values.
Returns:
Beam pipeline.
"""
# TODO(b/122478841): Consider making beam pipeline part of context to
# support fusion.
return beam.Pipeline(argv=self._get_beam_pipeline_args())
# TODO(b/114444977): Remove the unused_can_process_jointly argument and
# perhaps the need for this entire function.
def _MakeDatasetList(self, file_patterns: Sequence[Text], file_format: Text,
data_format: Text,
metadata: dataset_metadata.DatasetMetadata,
unused_can_process_jointly: bool) -> List[_Dataset]:
"""Makes a list of Dataset from the given `file_patterns`.
Args:
file_patterns: A list of file patterns where each pattern corresponds to
one `_Dataset`.
file_format: The file format of the datasets.
data_format: The data format of the datasets.
metadata: A DatasetMetadata object for the datasets.
unused_can_process_jointly: Whether paths can be processed jointly,
unused.
Returns:
A list of `_Dataset`.
"""
# File patterns will need to be processed independently.
return [
_Dataset(p, file_format, data_format, metadata) for p in file_patterns
]
@staticmethod
def _ShouldDecodeAsRawExample(data_format: Text) -> bool:
"""Returns true if data format should be decoded as raw example.
Args:
data_format: name of data format.
Returns:
True if data format should be decoded as raw example.
"""
return (Executor._IsDataFormatSequenceExample(data_format) or
Executor._IsDataFormatProto(data_format))
@staticmethod
def _IsDataFormatSequenceExample(data_format: Text) -> bool:
"""Returns true if data format is sequence example.
Args:
data_format: name of data format.
Returns:
True if data format is sequence example.
"""
return data_format == labels.FORMAT_TF_SEQUENCE_EXAMPLE
@staticmethod
def _IsDataFormatProto(data_format: Text) -> bool:
"""Returns true if data format is protocol buffer.
Args:
data_format: name of data format.
Returns:
True if data format is protocol buffer.
"""
return data_format == labels.FORMAT_PROTO
def _GetDesiredBatchSize(self, data_format: Text) -> Any:
"""Returns batch size.
Args:
data_format: name of data format.
Returns:
Batch size or None.
"""
if self._IsDataFormatSequenceExample(data_format):
return 1
return None
@staticmethod
def _DecodeAsRawExample(serialized_examples):
return {RAW_EXAMPLE_KEY: serialized_examples}
def _GetDecodeFunction(self, data_format: Text,
schema: dataset_schema.Schema) -> Any:
"""Returns the decode function for `data_format`.
Args:
data_format: name of data format.
schema: a dataset_schema.Schema for the data.
Returns:
Function for decoding examples.
"""
if self._ShouldDecodeAsRawExample(data_format):
if self._IsDataFormatSequenceExample(data_format):
tf.logging.warning(
'TFX Transform doesn\'t officially support tf.SequenceExample, '
'follow b/38235367 to track official support progress. We do not '
'guarantee not to break your pipeline if you use Transform with a '
'tf.SequenceExample data type. Use at your own risk.')
return self._DecodeAsRawExample
# TODO(b/122478841): Eventually make it always serialize.
return tft.coders.ExampleProtoCoder(schema, serialized=False).decode
@staticmethod
def _GetCacheSource():
return None
@staticmethod
def _GetCacheSink():
return None
@staticmethod
def _CopyCache(src, dst):
# TODO(b/37788560): Make this more efficient.
io_utils.copy_dir(src, dst)
| [
"apache_beam.metrics.Metrics.counter",
"apache_beam.Map",
"tensorflow.logging.info",
"tensorflow.logging.debug",
"tensorflow_data_validation.utils.batch_util.BatchExamplesToArrowTables",
"tfx.types.artifact_utils.get_split_uri",
"tfx.components.transform.common.GetSoleValue",
"tensorflow.logging.warni... | [((10272, 10318), 'apache_beam.typehints.with_input_types', 'beam.typehints.with_input_types', (['beam.Pipeline'], {}), '(beam.Pipeline)\n', (10303, 10318), True, 'import apache_beam as beam\n'), ((10322, 10373), 'apache_beam.typehints.with_output_types', 'beam.typehints.with_output_types', (['beam.pvalue.PDone'], {}), '(beam.pvalue.PDone)\n', (10354, 10373), True, 'import apache_beam as beam\n'), ((11421, 11467), 'apache_beam.typehints.with_input_types', 'beam.typehints.with_input_types', (['beam.Pipeline'], {}), '(beam.Pipeline)\n', (11452, 11467), True, 'import apache_beam as beam\n'), ((11524, 11637), 'apache_beam.typehints.with_output_types', 'beam.typehints.with_output_types', (['beam.typehints.KV[bytes, beam.typehints.Union[bytes, example_pb2.Example]]'], {}), '(beam.typehints.KV[bytes, beam.typehints.\n Union[bytes, example_pb2.Example]])\n', (11556, 11637), True, 'import apache_beam as beam\n'), ((12590, 12668), 'apache_beam.typehints.with_input_types', 'beam.typehints.with_input_types', (['beam.typehints.KV[bytes, example_pb2.Example]'], {}), '(beam.typehints.KV[bytes, example_pb2.Example])\n', (12621, 12668), True, 'import apache_beam as beam\n'), ((12679, 12730), 'apache_beam.typehints.with_output_types', 'beam.typehints.with_output_types', (['beam.pvalue.PDone'], {}), '(beam.pvalue.PDone)\n', (12711, 12730), True, 'import apache_beam as beam\n'), ((14617, 14694), 'apache_beam.typehints.with_input_types', 'beam.typehints.with_input_types', (['beam.typehints.Dict[str, beam.typehints.Any]'], {}), '(beam.typehints.Dict[str, beam.typehints.Any])\n', (14648, 14694), True, 'import apache_beam as beam\n'), ((14721, 14772), 'apache_beam.typehints.with_output_types', 'beam.typehints.with_output_types', (['beam.pvalue.PDone'], {}), '(beam.pvalue.PDone)\n', (14753, 14772), True, 'import apache_beam as beam\n'), ((15738, 15815), 'apache_beam.typehints.with_input_types', 'beam.typehints.with_input_types', (['beam.typehints.Dict[str, beam.typehints.Any]'], {}), '(beam.typehints.Dict[str, beam.typehints.Any])\n', (15769, 15815), True, 'import apache_beam as beam\n'), ((15819, 15896), 'apache_beam.typehints.with_output_types', 'beam.typehints.with_output_types', (['statistics_pb2.DatasetFeatureStatisticsList'], {}), '(statistics_pb2.DatasetFeatureStatisticsList)\n', (15851, 15896), True, 'import apache_beam as beam\n'), ((18106, 18182), 'apache_beam.typehints.with_input_types', 'beam.typehints.with_input_types', (['statistics_pb2.DatasetFeatureStatisticsList'], {}), '(statistics_pb2.DatasetFeatureStatisticsList)\n', (18137, 18182), True, 'import apache_beam as beam\n'), ((18186, 18237), 'apache_beam.typehints.with_output_types', 'beam.typehints.with_output_types', (['beam.pvalue.PDone'], {}), '(beam.pvalue.PDone)\n', (18218, 18237), True, 'import apache_beam as beam\n'), ((19079, 19191), 'apache_beam.typehints.with_input_types', 'beam.typehints.with_input_types', (['beam.typehints.KV[bytes, beam.typehints.Union[bytes, example_pb2.Example]]'], {}), '(beam.typehints.KV[bytes, beam.typehints.\n Union[bytes, example_pb2.Example]])\n', (19110, 19191), True, 'import apache_beam as beam\n'), ((19200, 19278), 'apache_beam.typehints.with_output_types', 'beam.typehints.with_output_types', (['beam.typehints.Dict[str, beam.typehints.Any]'], {}), '(beam.typehints.Dict[str, beam.typehints.Any])\n', (19232, 19278), True, 'import apache_beam as beam\n'), ((20263, 20374), 'apache_beam.typehints.with_input_types', 'beam.typehints.with_input_types', (['beam.typehints.Dict[str, beam.typehints.Any]'], {'metadata': 'beam.typehints.Any'}), '(beam.typehints.Dict[str, beam.typehints.Any\n ], metadata=beam.typehints.Any)\n', (20294, 20374), True, 'import apache_beam as beam\n'), ((20380, 20492), 'apache_beam.typehints.with_output_types', 'beam.typehints.with_output_types', (['beam.typehints.KV[beam.typehints.Union[None, bytes], example_pb2.Example]'], {}), '(beam.typehints.KV[beam.typehints.Union[\n None, bytes], example_pb2.Example])\n', (20412, 20492), True, 'import apache_beam as beam\n'), ((21280, 21326), 'apache_beam.typehints.with_input_types', 'beam.typehints.with_input_types', (['beam.Pipeline'], {}), '(beam.Pipeline)\n', (21311, 21326), True, 'import apache_beam as beam\n'), ((2300, 2333), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[]', 'tf.string'], {}), '([], tf.string)\n', (2318, 2333), True, 'import tensorflow as tf\n'), ((7558, 7621), 'tfx.types.artifact_utils.get_split_uri', 'artifact_utils.get_split_uri', (["input_dict['input_data']", '"""train"""'], {}), "(input_dict['input_data'], 'train')\n", (7586, 7621), False, 'from tfx.types import artifact_utils\n'), ((7692, 7754), 'tfx.types.artifact_utils.get_split_uri', 'artifact_utils.get_split_uri', (["input_dict['input_data']", '"""eval"""'], {}), "(input_dict['input_data'], 'eval')\n", (7720, 7754), False, 'from tfx.types import artifact_utils\n'), ((7936, 7998), 'tfx.types.artifact_utils.get_single_uri', 'artifact_utils.get_single_uri', (["output_dict['transform_output']"], {}), "(output_dict['transform_output'])\n", (7965, 7998), False, 'from tfx.types import artifact_utils\n'), ((8039, 8113), 'tfx.types.artifact_utils.get_split_uri', 'artifact_utils.get_split_uri', (["output_dict['transformed_examples']", '"""train"""'], {}), "(output_dict['transformed_examples'], 'train')\n", (8067, 8113), False, 'from tfx.types import artifact_utils\n'), ((8153, 8226), 'tfx.types.artifact_utils.get_split_uri', 'artifact_utils.get_split_uri', (["output_dict['transformed_examples']", '"""eval"""'], {}), "(output_dict['transformed_examples'], 'eval')\n", (8181, 8226), False, 'from tfx.types import artifact_utils\n'), ((8252, 8313), 'os.path.join', 'os.path.join', (['transform_output', '_TEMP_DIR_IN_TRANSFORM_OUTPUT'], {}), '(transform_output, _TEMP_DIR_IN_TRANSFORM_OUTPUT)\n', (8264, 8313), False, 'import os\n'), ((8318, 8380), 'tensorflow.logging.debug', 'tf.logging.debug', (['"""Using temp path %s for tft.beam"""', 'temp_path'], {}), "('Using temp path %s for tft.beam', temp_path)\n", (8334, 8380), True, 'import tensorflow as tf\n'), ((10120, 10194), 'tensorflow.logging.info', 'tf.logging.info', (['"""Cleaning up temp path %s on executor success"""', 'temp_path'], {}), "('Cleaning up temp path %s on executor success', temp_path)\n", (10135, 10194), True, 'import tensorflow as tf\n'), ((10199, 10229), 'tfx.utils.io_utils.delete_dir', 'io_utils.delete_dir', (['temp_path'], {}), '(temp_path)\n', (10218, 10229), False, 'from tfx.utils import io_utils\n'), ((13674, 13697), 'tfx.utils.io_utils.SchemaReader', 'io_utils.SchemaReader', ([], {}), '()\n', (13695, 13697), False, 'from tfx.utils import io_utils\n'), ((17742, 17769), 'tensorflow_data_validation.__version__.split', 'tfdv.__version__.split', (['"""."""'], {}), "('.')\n", (17764, 17769), True, 'import tensorflow_data_validation as tfdv\n'), ((22874, 22912), 'six.iteritems', 'six.iteritems', (['analysis_key_to_dataset'], {}), '(analysis_key_to_dataset)\n', (22887, 22912), False, 'import six\n'), ((26260, 26320), 'tfx.components.transform.common.GetSoleValue', 'common.GetSoleValue', (['inputs', 'labels.COMPUTE_STATISTICS_LABEL'], {}), '(inputs, labels.COMPUTE_STATISTICS_LABEL)\n', (26279, 26320), False, 'from tfx.components.transform import common\n'), ((26394, 26467), 'tfx.components.transform.common.GetSoleValue', 'common.GetSoleValue', (['outputs', 'labels.TRANSFORM_METADATA_OUTPUT_PATH_LABEL'], {}), '(outputs, labels.TRANSFORM_METADATA_OUTPUT_PATH_LABEL)\n', (26413, 26467), False, 'from tfx.components.transform import common\n'), ((26508, 26570), 'tfx.components.transform.common.GetSoleValue', 'common.GetSoleValue', (['inputs', 'labels.EXAMPLES_DATA_FORMAT_LABEL'], {}), '(inputs, labels.EXAMPLES_DATA_FORMAT_LABEL)\n', (26527, 26570), False, 'from tfx.components.transform import common\n'), ((26593, 26646), 'tfx.components.transform.common.GetSoleValue', 'common.GetSoleValue', (['inputs', 'labels.SCHEMA_PATH_LABEL'], {}), '(inputs, labels.SCHEMA_PATH_LABEL)\n', (26612, 26646), False, 'from tfx.components.transform import common\n'), ((27564, 27638), 'tfx.components.transform.common.GetValues', 'common.GetValues', (['outputs', 'labels.TRANSFORM_MATERIALIZE_OUTPUT_PATHS_LABEL'], {}), '(outputs, labels.TRANSFORM_MATERIALIZE_OUTPUT_PATHS_LABEL)\n', (27580, 27638), False, 'from tfx.components.transform import common\n'), ((30276, 30352), 'tfx.components.transform.common.GetSoleValue', 'common.GetSoleValue', (['inputs', 'labels.EXAMPLES_FILE_FORMAT_LABEL'], {'strict': '(False)'}), '(inputs, labels.EXAMPLES_FILE_FORMAT_LABEL, strict=False)\n', (30295, 30352), False, 'from tfx.components.transform import common\n'), ((30401, 30472), 'tfx.components.transform.common.GetValues', 'common.GetValues', (['inputs', 'labels.ANALYZE_AND_TRANSFORM_DATA_PATHS_LABEL'], {}), '(inputs, labels.ANALYZE_AND_TRANSFORM_DATA_PATHS_LABEL)\n', (30417, 30472), False, 'from tfx.components.transform import common\n'), ((30514, 30578), 'tfx.components.transform.common.GetValues', 'common.GetValues', (['inputs', 'labels.TRANSFORM_ONLY_DATA_PATHS_LABEL'], {}), '(inputs, labels.TRANSFORM_ONLY_DATA_PATHS_LABEL)\n', (30530, 30578), False, 'from tfx.components.transform import common\n'), ((30609, 30674), 'tfx.components.transform.common.GetSoleValue', 'common.GetSoleValue', (['inputs', 'labels.TFT_STATISTICS_USE_TFDV_LABEL'], {}), '(inputs, labels.TFT_STATISTICS_USE_TFDV_LABEL)\n', (30628, 30674), False, 'from tfx.components.transform import common\n'), ((30749, 30815), 'tfx.components.transform.common.GetValues', 'common.GetValues', (['outputs', 'labels.PER_SET_STATS_OUTPUT_PATHS_LABEL'], {}), '(outputs, labels.PER_SET_STATS_OUTPUT_PATHS_LABEL)\n', (30765, 30815), False, 'from tfx.components.transform import common\n'), ((30841, 30895), 'tfx.components.transform.common.GetSoleValue', 'common.GetSoleValue', (['outputs', 'labels.TEMP_OUTPUT_LABEL'], {}), '(outputs, labels.TEMP_OUTPUT_LABEL)\n', (30860, 30895), False, 'from tfx.components.transform import common\n'), ((30919, 30991), 'tfx.components.transform.common.GetSoleValue', 'common.GetSoleValue', (['inputs', 'labels.CACHE_INPUT_PATH_LABEL'], {'strict': '(False)'}), '(inputs, labels.CACHE_INPUT_PATH_LABEL, strict=False)\n', (30938, 30991), False, 'from tfx.components.transform import common\n'), ((31024, 31098), 'tfx.components.transform.common.GetSoleValue', 'common.GetSoleValue', (['outputs', 'labels.CACHE_OUTPUT_PATH_LABEL'], {'strict': '(False)'}), '(outputs, labels.CACHE_OUTPUT_PATH_LABEL, strict=False)\n', (31043, 31098), False, 'from tfx.components.transform import common\n'), ((31491, 31558), 'tensorflow.logging.info', 'tf.logging.info', (['"""Transform output path: %s"""', 'transform_output_path'], {}), "('Transform output path: %s', transform_output_path)\n", (31506, 31558), True, 'import tensorflow as tf\n'), ((43629, 43680), 'tensorflow.logging.info', 'tf.logging.info', (['"""Processing an in-place transform"""'], {}), "('Processing an in-place transform')\n", (43644, 43680), True, 'import tensorflow as tf\n'), ((43705, 43780), 'os.path.join', 'os.path.join', (['transform_output_path', 'tft.TFTransformOutput.RAW_METADATA_DIR'], {}), '(transform_output_path, tft.TFTransformOutput.RAW_METADATA_DIR)\n', (43717, 43780), False, 'import os\n'), ((43821, 43875), 'tensorflow_transform.tf_metadata.metadata_io.write_metadata', 'metadata_io.write_metadata', (['metadata', 'raw_metadata_dir'], {}), '(metadata, raw_metadata_dir)\n', (43847, 43875), False, 'from tensorflow_transform.tf_metadata import metadata_io\n'), ((45432, 45520), 'os.path.join', 'os.path.join', (['transform_output_path', 'tft.TFTransformOutput.TRANSFORMED_METADATA_DIR'], {}), '(transform_output_path, tft.TFTransformOutput.\n TRANSFORMED_METADATA_DIR)\n', (45444, 45520), False, 'import os\n'), ((45529, 45603), 'tensorflow_transform.tf_metadata.metadata_io.write_metadata', 'metadata_io.write_metadata', (['transformed_metadata', 'transformed_metadata_dir'], {}), '(transformed_metadata, transformed_metadata_dir)\n', (45555, 45603), False, 'from tensorflow_transform.tf_metadata import metadata_io\n'), ((49618, 49645), 'tfx.utils.io_utils.copy_dir', 'io_utils.copy_dir', (['src', 'dst'], {}), '(src, dst)\n', (49635, 49645), False, 'from tfx.utils import io_utils\n'), ((7860, 7911), 'tfx.types.artifact_utils.get_single_uri', 'artifact_utils.get_single_uri', (["input_dict['schema']"], {}), "(input_dict['schema'])\n", (7889, 7911), False, 'from tfx.types import artifact_utils\n'), ((8846, 8888), 'tfx.utils.io_utils.all_files_pattern', 'io_utils.all_files_pattern', (['train_data_uri'], {}), '(train_data_uri)\n', (8872, 8888), False, 'from tfx.utils import io_utils\n'), ((8950, 8991), 'tfx.utils.io_utils.all_files_pattern', 'io_utils.all_files_pattern', (['eval_data_uri'], {}), '(eval_data_uri)\n', (8976, 8991), False, 'from tfx.utils import io_utils\n'), ((14215, 14268), 'tensorflow_transform.tf_metadata.dataset_metadata.DatasetMetadata', 'dataset_metadata.DatasetMetadata', (['_RAW_EXAMPLE_SCHEMA'], {}), '(_RAW_EXAMPLE_SCHEMA)\n', (14247, 14268), False, 'from tensorflow_transform.tf_metadata import dataset_metadata\n'), ((14479, 14504), 'tensorflow_transform.tf_metadata.dataset_schema.Schema', 'dataset_schema.Schema', (['{}'], {}), '({})\n', (14500, 14504), False, 'from tensorflow_transform.tf_metadata import dataset_schema\n'), ((16260, 16303), 'tensorflow_transform.tf_metadata.schema_utils.schema_as_feature_spec', 'schema_utils.schema_as_feature_spec', (['schema'], {}), '(schema)\n', (16295, 16303), False, 'from tensorflow_transform.tf_metadata import schema_utils\n'), ((16922, 16950), 'six.iteritems', 'six.iteritems', (['feature_specs'], {}), '(feature_specs)\n', (16935, 16950), False, 'import six\n'), ((18642, 18676), 'os.path.dirname', 'os.path.dirname', (['stats_output_path'], {}), '(stats_output_path)\n', (18657, 18676), False, 'import os\n'), ((21780, 21840), 'tensorflow_transform.beam.analyzer_cache.make_dataset_key', 'analyzer_cache.make_dataset_key', (['dataset.file_pattern_suffix'], {}), '(dataset.file_pattern_suffix)\n', (21811, 21840), False, 'from tensorflow_transform.beam import analyzer_cache\n'), ((23641, 23702), 'tfx.components.transform.common.GetSoleValue', 'common.GetSoleValue', (['inputs', 'labels.MODULE_FILE'], {'strict': '(False)'}), '(inputs, labels.MODULE_FILE, strict=False)\n', (23660, 23702), False, 'from tfx.components.transform import common\n'), ((23745, 23811), 'tfx.components.transform.common.GetSoleValue', 'common.GetSoleValue', (['inputs', 'labels.PREPROCESSING_FN'], {'strict': '(False)'}), '(inputs, labels.PREPROCESSING_FN, strict=False)\n', (23764, 23811), False, 'from tfx.components.transform import common\n'), ((27805, 27866), 'tensorflow_transform.get_analyze_input_columns', 'tft.get_analyze_input_columns', (['preprocessing_fn', 'feature_spec'], {}), '(preprocessing_fn, feature_spec)\n', (27834, 27866), True, 'import tensorflow_transform as tft\n'), ((31717, 31778), 'tensorflow_transform.get_analyze_input_columns', 'tft.get_analyze_input_columns', (['preprocessing_fn', 'feature_spec'], {}), '(preprocessing_fn, feature_spec)\n', (31746, 31778), True, 'import tensorflow_transform as tft\n'), ((31834, 31897), 'tensorflow_transform.get_transform_input_columns', 'tft.get_transform_input_columns', (['preprocessing_fn', 'feature_spec'], {}), '(preprocessing_fn, feature_spec)\n', (31865, 31897), True, 'import tensorflow_transform as tft\n'), ((49343, 49397), 'tensorflow_transform.coders.ExampleProtoCoder', 'tft.coders.ExampleProtoCoder', (['schema'], {'serialized': '(False)'}), '(schema, serialized=False)\n', (49371, 49397), True, 'import tensorflow_transform as tft\n'), ((8507, 8556), 'tfx.types.artifact_utils.get_single_uri', 'artifact_utils.get_single_uri', (['params_dict[label]'], {}), '(params_dict[label])\n', (8536, 8556), False, 'from tfx.types import artifact_utils\n'), ((9562, 9638), 'os.path.join', 'os.path.join', (['transformed_train_output', '_DEFAULT_TRANSFORMED_EXAMPLES_PREFIX'], {}), '(transformed_train_output, _DEFAULT_TRANSFORMED_EXAMPLES_PREFIX)\n', (9574, 9638), False, 'import os\n'), ((9677, 9752), 'os.path.join', 'os.path.join', (['transformed_eval_output', '_DEFAULT_TRANSFORMED_EXAMPLES_PREFIX'], {}), '(transformed_eval_output, _DEFAULT_TRANSFORMED_EXAMPLES_PREFIX)\n', (9689, 9752), False, 'import os\n'), ((11342, 11377), 'apache_beam.Map', 'beam.Map', (['_MakeAndIncrementCounters'], {}), '(_MakeAndIncrementCounters)\n', (11350, 11377), True, 'import apache_beam as beam\n'), ((12262, 12291), 'apache_beam.Map', 'beam.Map', (['(lambda x: (None, x))'], {}), '(lambda x: (None, x))\n', (12270, 12291), True, 'import apache_beam as beam\n'), ((17568, 17629), 'apache_beam.Map', 'beam.Map', (['EncodeTFDV'], {'feature_specs': 'feature_specs_from_schema'}), '(EncodeTFDV, feature_specs=feature_specs_from_schema)\n', (17576, 17629), True, 'import apache_beam as beam\n'), ((17879, 17918), 'tensorflow_data_validation.utils.batch_util.BatchExamplesToArrowTables', 'batch_util.BatchExamplesToArrowTables', ([], {}), '()\n', (17916, 17918), False, 'from tensorflow_data_validation.utils import batch_util\n'), ((20234, 20258), 'apache_beam.Map', 'beam.Map', (['decode_example'], {}), '(decode_example)\n', (20242, 20258), True, 'import apache_beam as beam\n'), ((20825, 20888), 'tensorflow_transform.coders.ExampleProtoCoder', 'tft.coders.ExampleProtoCoder', (['metadata.schema'], {'serialized': '(False)'}), '(metadata.schema, serialized=False)\n', (20853, 20888), True, 'import tensorflow_transform as tft\n'), ((24080, 24127), 'tfx.components.transform.common.GetSoleValue', 'common.GetSoleValue', (['inputs', 'labels.MODULE_FILE'], {}), '(inputs, labels.MODULE_FILE)\n', (24099, 24127), False, 'from tfx.components.transform import common\n'), ((24184, 24236), 'tfx.components.transform.common.GetSoleValue', 'common.GetSoleValue', (['inputs', 'labels.PREPROCESSING_FN'], {}), '(inputs, labels.PREPROCESSING_FN)\n', (24203, 24236), False, 'from tfx.components.transform import common\n'), ((28353, 28566), 'tensorflow.logging.warning', 'tf.logging.warning', (['"""Using the in-place Transform since compute_statistics=False, it does not materialize transformed data, and the configured preprocessing_fn appears to not require analyzing the data."""'], {}), "(\n 'Using the in-place Transform since compute_statistics=False, it does not materialize transformed data, and the configured preprocessing_fn appears to not require analyzing the data.'\n )\n", (28371, 28566), True, 'import tensorflow as tf\n'), ((32692, 32799), 'tensorflow_transform.tf_metadata.dataset_schema.from_feature_spec', 'dataset_schema.from_feature_spec', (['{feature: feature_spec[feature] for feature in analyze_input_columns}'], {}), '({feature: feature_spec[feature] for\n feature in analyze_input_columns})\n', (32724, 32799), False, 'from tensorflow_transform.tf_metadata import dataset_schema\n'), ((32912, 33021), 'tensorflow_transform.tf_metadata.dataset_schema.from_feature_spec', 'dataset_schema.from_feature_spec', (['{feature: feature_spec[feature] for feature in transform_input_columns}'], {}), '({feature: feature_spec[feature] for\n feature in transform_input_columns})\n', (32944, 33021), False, 'from tensorflow_transform.tf_metadata import dataset_schema\n'), ((33803, 33975), 'tensorflow_transform.beam.Context', 'tft_beam.Context', ([], {'temp_dir': 'temp_path', 'desired_batch_size': 'desired_batch_size', 'passthrough_keys': '{_TRANSFORM_INTERNAL_FEATURE_FOR_KEY}', 'use_deep_copy_optimization': '(True)'}), '(temp_dir=temp_path, desired_batch_size=desired_batch_size,\n passthrough_keys={_TRANSFORM_INTERNAL_FEATURE_FOR_KEY},\n use_deep_copy_optimization=True)\n', (33819, 33975), True, 'import tensorflow_transform.beam as tft_beam\n'), ((35819, 35855), 'six.iteritems', 'six.iteritems', (['new_analyze_data_dict'], {}), '(new_analyze_data_dict)\n', (35832, 35855), False, 'import six\n'), ((43931, 43954), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph'}), '(graph=graph)\n', (43941, 43954), True, 'import tensorflow as tf\n'), ((44748, 44789), 'tensorflow_transform.impl_helper.copy_tensors', 'impl_helper.copy_tensors', (['input_signature'], {}), '(input_signature)\n', (44772, 44789), False, 'from tensorflow_transform import impl_helper\n'), ((44972, 45047), 'os.path.join', 'os.path.join', (['transform_output_path', 'tft.TFTransformOutput.TRANSFORM_FN_DIR'], {}), '(transform_output_path, tft.TFTransformOutput.TRANSFORM_FN_DIR)\n', (44984, 45047), False, 'import os\n'), ((45097, 45214), 'tensorflow_transform.saved.saved_transform_io.write_saved_transform_from_session', 'saved_transform_io.write_saved_transform_from_session', (['sess', 'input_signature', 'output_signature', 'transform_fn_path'], {}), '(sess, input_signature,\n output_signature, transform_fn_path)\n', (45150, 45214), False, 'from tensorflow_transform.saved import saved_transform_io\n'), ((48908, 49181), 'tensorflow.logging.warning', 'tf.logging.warning', (['"""TFX Transform doesn\'t officially support tf.SequenceExample, follow b/38235367 to track official support progress. We do not guarantee not to break your pipeline if you use Transform with a tf.SequenceExample data type. Use at your own risk."""'], {}), '(\n "TFX Transform doesn\'t officially support tf.SequenceExample, follow b/38235367 to track official support progress. We do not guarantee not to break your pipeline if you use Transform with a tf.SequenceExample data type. Use at your own risk."\n )\n', (48926, 49181), True, 'import tensorflow as tf\n'), ((10781, 10871), 'apache_beam.metrics.Metrics.counter', 'beam.metrics.Metrics.counter', (['tft_beam_common.METRICS_NAMESPACE', '"""total_columns_count"""'], {}), "(tft_beam_common.METRICS_NAMESPACE,\n 'total_columns_count')\n", (10809, 10871), True, 'import apache_beam as beam\n'), ((10920, 11012), 'apache_beam.metrics.Metrics.counter', 'beam.metrics.Metrics.counter', (['tft_beam_common.METRICS_NAMESPACE', '"""analyze_columns_count"""'], {}), "(tft_beam_common.METRICS_NAMESPACE,\n 'analyze_columns_count')\n", (10948, 11012), True, 'import apache_beam as beam\n'), ((11063, 11157), 'apache_beam.metrics.Metrics.counter', 'beam.metrics.Metrics.counter', (['tft_beam_common.METRICS_NAMESPACE', '"""transform_columns_count"""'], {}), "(tft_beam_common.METRICS_NAMESPACE,\n 'transform_columns_count')\n", (11091, 11157), True, 'import apache_beam as beam\n'), ((11279, 11298), 'apache_beam.Create', 'beam.Create', (['[None]'], {}), '([None])\n', (11290, 11298), True, 'import apache_beam as beam\n'), ((13240, 13253), 'apache_beam.Values', 'beam.Values', ([], {}), '()\n', (13251, 13253), True, 'import apache_beam as beam\n'), ((18029, 18061), 'tensorflow_data_validation.StatsOptions', 'tfdv.StatsOptions', ([], {'schema': 'schema'}), '(schema=schema)\n', (18046, 18061), True, 'import tensorflow_data_validation as tfdv\n'), ((36299, 36344), 'tensorflow.logging.info', 'tf.logging.info', (['"""Analyzing data with cache."""'], {}), "('Analyzing data with cache.')\n", (36314, 36344), True, 'import tensorflow as tf\n'), ((37315, 37353), 'tensorflow.io.gfile.makedirs', 'tf.io.gfile.makedirs', (['output_cache_dir'], {}), '(output_cache_dir)\n', (37335, 37353), True, 'import tensorflow as tf\n'), ((37364, 37427), 'tensorflow.logging.info', 'tf.logging.info', (['"""Using existing cache in: %s"""', 'input_cache_dir'], {}), "('Using existing cache in: %s', input_cache_dir)\n", (37379, 37427), True, 'import tensorflow as tf\n'), ((43886, 43896), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (43894, 43896), True, 'import tensorflow as tf\n'), ((44867, 44900), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (44898, 44900), True, 'import tensorflow as tf\n'), ((44919, 44942), 'tensorflow.tables_initializer', 'tf.tables_initializer', ([], {}), '()\n', (44940, 44942), True, 'import tensorflow as tf\n'), ((13408, 13451), 'apache_beam.coders.ProtoCoder', 'beam.coders.ProtoCoder', (['example_pb2.Example'], {}), '(example_pb2.Example)\n', (13430, 13451), True, 'import apache_beam as beam\n'), ((17167, 17227), 'numpy.asarray', 'np.asarray', (['feature_value', 'feature_spec.dtype.as_numpy_dtype'], {}), '(feature_value, feature_spec.dtype.as_numpy_dtype)\n', (17177, 17227), True, 'import numpy as np\n'), ((17290, 17358), 'numpy.asarray', 'np.asarray', (['[feature_value]'], {'dtype': 'feature_spec.dtype.as_numpy_dtype'}), '([feature_value], dtype=feature_spec.dtype.as_numpy_dtype)\n', (17300, 17358), True, 'import numpy as np\n'), ((18954, 19021), 'apache_beam.coders.ProtoCoder', 'beam.coders.ProtoCoder', (['statistics_pb2.DatasetFeatureStatisticsList'], {}), '(statistics_pb2.DatasetFeatureStatisticsList)\n', (18976, 19021), True, 'import apache_beam as beam\n'), ((36528, 36590), 'tensorflow_transform.beam.AnalyzeDatasetWithCache', 'tft_beam.AnalyzeDatasetWithCache', (['preprocessing_fn'], {'pipeline': 'p'}), '(preprocessing_fn, pipeline=p)\n', (36560, 36590), True, 'import tensorflow_transform.beam as tft_beam\n'), ((37111, 37159), 'tensorflow_transform.beam.WriteTransformFn', 'tft_beam.WriteTransformFn', (['transform_output_path'], {}), '(transform_output_path)\n', (37136, 37159), True, 'import tensorflow_transform.beam as tft_beam\n'), ((38646, 38742), 'os.path.join', 'os.path.join', (['transform_output_path', 'tft.TFTransformOutput.PRE_TRANSFORM_FEATURE_STATS_PATH'], {}), '(transform_output_path, tft.TFTransformOutput.\n PRE_TRANSFORM_FEATURE_STATS_PATH)\n', (38658, 38742), False, 'import os\n'), ((40911, 41008), 'os.path.join', 'os.path.join', (['transform_output_path', 'tft.TFTransformOutput.POST_TRANSFORM_FEATURE_STATS_PATH'], {}), '(transform_output_path, tft.TFTransformOutput.\n POST_TRANSFORM_FEATURE_STATS_PATH)\n', (40923, 41008), False, 'import os\n'), ((45309, 45381), 'tensorflow_transform.schema_inference.infer_feature_schema', 'tft.schema_inference.infer_feature_schema', (['output_signature', 'graph', 'sess'], {}), '(output_signature, graph, sess)\n', (45350, 45381), True, 'import tensorflow_transform as tft\n'), ((12120, 12144), 'apache_beam.coders.BytesCoder', 'beam.coders.BytesCoder', ([], {}), '()\n', (12142, 12144), True, 'import apache_beam as beam\n'), ((12434, 12471), 'tensorflow.core.example.example_pb2.Example.FromString', 'example_pb2.Example.FromString', (['kv[1]'], {}), '(kv[1])\n', (12464, 12471), False, 'from tensorflow.core.example import example_pb2\n'), ((36183, 36207), 'apache_beam.Flatten', 'beam.Flatten', ([], {'pipeline': 'p'}), '(pipeline=p)\n', (36195, 36207), True, 'import apache_beam as beam\n'), ((36749, 36824), 'os.path.join', 'os.path.join', (['transform_output_path', 'tft.TFTransformOutput.RAW_METADATA_DIR'], {}), '(transform_output_path, tft.TFTransformOutput.RAW_METADATA_DIR)\n', (36761, 36824), False, 'import os\n'), ((37786, 37831), 'os.path.join', 'os.path.join', (['input_cache_dir', 'span_cache_dir'], {}), '(input_cache_dir, span_cache_dir)\n', (37798, 37831), False, 'import os\n'), ((37898, 37936), 'tensorflow.io.gfile.isdir', 'tf.io.gfile.isdir', (['full_span_cache_dir'], {}), '(full_span_cache_dir)\n', (37915, 37936), True, 'import tensorflow as tf\n'), ((40411, 40438), 'tensorflow_transform.beam.TransformDataset', 'tft_beam.TransformDataset', ([], {}), '()\n', (40436, 40438), True, 'import tensorflow_transform.beam as tft_beam\n'), ((38023, 38069), 'os.path.join', 'os.path.join', (['output_cache_dir', 'span_cache_dir'], {}), '(output_cache_dir, span_cache_dir)\n', (38035, 38069), False, 'import os\n'), ((39052, 39076), 'apache_beam.Flatten', 'beam.Flatten', ([], {'pipeline': 'p'}), '(pipeline=p)\n', (39064, 39076), True, 'import apache_beam as beam\n'), ((41537, 41551), 'apache_beam.Flatten', 'beam.Flatten', ([], {}), '()\n', (41549, 41551), True, 'import apache_beam as beam\n')] |
import abc
from typing import Dict
import numpy as np
from algorithms.abstract_state import AbstractState, AbstractMove
from game.development_cards import DevelopmentCard
from game.pieces import *
from game.resource import Resource
class AbstractPlayer(abc.ABC):
c = 1
def __init__(self, seed: int=None, timeout_seconds=5):
assert seed is None or (isinstance(seed, int) and seed > 0)
AbstractPlayer.c += 1
seed = seed if seed is None else int(seed * AbstractPlayer.c)
self._random_choice = np.random.RandomState(seed).choice
self._timeout_seconds = timeout_seconds
self.resources = {r: 0 for r in Resource}
self.pieces = {
Colony.Settlement: 5,
Colony.City: 4,
Road.Paved: 15
}
self.unexposed_development_cards = {card: 0 for card in DevelopmentCard}
self.exposed_development_cards = {card: 0 for card in DevelopmentCard}
@abc.abstractmethod
def choose_move(self, state: AbstractState) -> AbstractMove:
"""
Implement decision mechanism here
:param state: Game state to help decide on a move
:return: Selected AbstractMove to be made
"""
raise NotImplementedError()
@abc.abstractmethod
def choose_resources_to_drop(self) -> Dict[Resource, int]:
"""
Implement here decision which resources to drop when the dice roll 7
:param: state: Game state to help decide on a move
:return: Dict[Resource, int] from resources to the number of resources to drop
"""
raise NotImplementedError()
def add_resource(self, resource_type: Resource, how_many=1):
"""
As the name implies
:param resource_type: Brick, Lumber, Wool, Grain, Ore
:param how_many: number of resource units to add
:return: None
"""
self.resources[resource_type] += how_many
def remove_resource(self, resource_type: Resource, how_many=1):
"""
As the name implies
:param resource_type: Brick, Lumber, Wool, Grain, Ore, Desert
:param how_many: number of resource units to remove
:return: None
"""
self.add_resource(resource_type, -how_many)
@staticmethod
def update_players_resources(resources_amounts_by_players, update_method):
for player, resources_to_amount in resources_amounts_by_players.items():
player.update_resources(resources_to_amount, update_method)
def update_resources(self, resources_amount: Dict[Resource, int], update_method):
"""
update resources according to given histogram, with given method
:param resources_amount: dictionary of the amounts of resources
:param update_method: add/remove/anything you may imagine.
i.e AbstractPlayer.add_resource/AbstractPlayer.add_resource
:return: None
"""
for resource, amount in resources_amount.items():
update_method(self, resource, amount)
def get_resource_count(self, resource_type: Resource):
"""
As the name implies
:param resource_type: Brick, Lumber, Wool, Grain, Ore, Desert
:return: the number of resource units the player has
"""
return self.resources[resource_type]
def add_unexposed_development_card(self, card: DevelopmentCard):
"""
increase by 1 the count of the development card 'card'
:param card: the (probably) purchased development card
:return: None
"""
self.unexposed_development_cards[card] += 1
def remove_unexposed_development_card(self, card: DevelopmentCard):
"""
revert the side effects of 'add_unexposed_development_card' method
:param card: the (probably) purchased development card to be "un-purchased"
:return: None
"""
self.unexposed_development_cards[card] -= 1
def expose_development_card(self, card: DevelopmentCard):
"""
only counts the number of exposed/unexposed cards!
card effect not applied!
:param card: the exposed development card
:return: None
"""
assert self.unexposed_development_cards[card] >= 1
self.unexposed_development_cards[card] -= 1
self.exposed_development_cards[card] += 1
def un_expose_development_card(self, card: DevelopmentCard):
"""
only counts the number of exposed/unexposed cards!
card effect not reverted!
:param card: the exposed development card to be un-exposed
:return: None
"""
assert self.exposed_development_cards[card] >= 1
self.unexposed_development_cards[card] += 1
self.exposed_development_cards[card] -= 1
def get_unexposed_development_cards(self):
# for card_type, amount in self.unexposed_development_cards.items():
# for _ in range(amount):
# if card_type != DevelopmentCard.VictoryPoint:
# yield card_type
return self.unexposed_development_cards
def get_exposed_knights_count(self) -> int:
"""
get the number of times this player used a "knight" development-card
:return: int, the number of times "knight" card was used by the player
"""
return self.exposed_development_cards[DevelopmentCard.Knight]
def get_victory_point_development_cards_count(self) -> int:
"""
get the number of "victory points" development-card the player has
:return: int, the number of times "victory points" development-card the player has
"""
return self.unexposed_development_cards[DevelopmentCard.VictoryPoint]
def has_unexposed_development_card(self):
"""
indicate whether there is an unexposed development card
victory point cards are not checked - they are never exposed
:return: True if there is an unexposed development card, False otherwise
"""
for c in DevelopmentCard:
if c != DevelopmentCard.VictoryPoint and self.unexposed_development_cards[c] != 0:
return True
return False
def can_pave_road(self):
"""
indicate whether there are enough resources to pave a road
:return: True if enough resources to pave a road, False otherwise
"""
return (self.resources[Resource.Brick] >= 1 and
self.resources[Resource.Lumber] >= 1 and
self.pieces[Road.Paved] > 0)
def amount_of_roads_can_afford(self):
return min(self.resources[Resource.Brick],
self.resources[Resource.Lumber],
self.pieces[Road.Paved])
def can_settle_settlement(self):
"""
indicate whether there are enough resources to build a settlement
:return: True if enough resources to build a settlement, False otherwise
"""
return (self.resources[Resource.Brick] >= 1 and
self.resources[Resource.Lumber] >= 1 and
self.resources[Resource.Wool] >= 1 and
self.resources[Resource.Grain] >= 1 and
self.pieces[Colony.Settlement] > 0)
def amount_of_settlements_can_afford(self):
return min(self.pieces[Colony.Settlement],
self.resources[Resource.Brick],
self.resources[Resource.Lumber],
self.resources[Resource.Wool],
self.resources[Resource.Grain])
def can_settle_city(self):
"""
indicate whether there are enough resources to build a city
:return: True if enough resources to build a city, False otherwise
"""
return (self.resources[Resource.Ore] >= 3 and
self.resources[Resource.Grain] >= 2 and
self.pieces[Colony.City] > 0)
def amount_of_cities_can_afford(self):
return min(int(self.resources[Resource.Ore] / 3),
int(self.resources[Resource.Grain] / 2),
self.pieces[Colony.City])
def has_resources_for_development_card(self):
"""
indicate whether there are enough resources to buy a development card
NOTE: unlike can_* methods, this method doesn't check there are needed
pieces (in this case develpoment-cards in the deck)
:return: True if enough resources to buy a development card, False otherwise
"""
return (self.resources[Resource.Ore] >= 1 and
self.resources[Resource.Wool] >= 1 and
self.resources[Resource.Grain] >= 1)
def remove_resources_and_piece_for_road(self):
assert self.can_pave_road()
self.remove_resource(Resource.Brick)
self.remove_resource(Resource.Lumber)
self.pieces[Road.Paved] -= 1
def remove_resources_and_piece_for_settlement(self):
assert self.can_settle_settlement()
self.remove_resource(Resource.Brick)
self.remove_resource(Resource.Lumber)
self.remove_resource(Resource.Wool)
self.remove_resource(Resource.Grain)
self.pieces[Colony.Settlement] -= 1
def remove_resources_and_piece_for_city(self):
assert self.can_settle_city()
self.remove_resource(Resource.Ore, 3)
self.remove_resource(Resource.Grain, 2)
self.pieces[Colony.City] -= 1
def remove_resources_for_development_card(self):
assert self.has_resources_for_development_card()
self.remove_resource(Resource.Ore)
self.remove_resource(Resource.Wool)
self.remove_resource(Resource.Grain)
def add_resources_and_piece_for_road(self):
self.add_resource(Resource.Brick)
self.add_resource(Resource.Lumber)
self.pieces[Road.Paved] += 1
def add_resources_and_piece_for_settlement(self):
self.add_resource(Resource.Brick)
self.add_resource(Resource.Lumber)
self.add_resource(Resource.Wool)
self.add_resource(Resource.Grain)
self.pieces[Colony.Settlement] += 1
def add_resources_and_piece_for_city(self):
self.add_resource(Resource.Ore, 3)
self.add_resource(Resource.Grain, 2)
self.pieces[Colony.City] += 1
def add_resources_for_development_card(self):
self.add_resource(Resource.Ore)
self.add_resource(Resource.Wool)
self.add_resource(Resource.Grain)
def trade_resources(self, source_resource: Resource, target_resource: Resource, count: int, ratio: int):
self.remove_resource(source_resource, count * ratio)
self.add_resource(target_resource, count)
def un_trade_resources(self, source_resource: Resource, target_resource: Resource, count: int, ratio: int):
self.add_resource(source_resource, count * ratio)
self.remove_resource(target_resource, count)
| [
"numpy.random.RandomState"
] | [((536, 563), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (557, 563), True, 'import numpy as np\n')] |
# ******************************************************************************
# Copyright 2017-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import numpy as np
import pytest
from _pyngraph import PartialShape
import ngraph as ng
import ngraph.opset1 as ng_opset1
from ngraph.impl import Type
from tests import skip_segfault
np_types = [np.float32, np.int32]
integral_np_types = [
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
]
@pytest.mark.parametrize("dtype", np_types)
def test_binary_convolution(dtype):
strides = np.array([1, 1])
pads_begin = np.array([0, 0])
pads_end = np.array([0, 0])
dilations = np.array([1, 1])
mode = "xnor-popcount"
pad_value = 0.0
input0_shape = [1, 1, 9, 9]
input1_shape = [1, 1, 3, 3]
expected_shape = [1, 1, 7, 7]
parameter_input0 = ng.parameter(input0_shape, name="Input0", dtype=dtype)
parameter_input1 = ng.parameter(input1_shape, name="Input1", dtype=dtype)
node = ng.binary_convolution(
parameter_input0, parameter_input1, strides, pads_begin, pads_end, dilations, mode, pad_value,
)
assert node.get_type_name() == "BinaryConvolution"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == expected_shape
@pytest.mark.parametrize("dtype", np_types)
def test_ctc_greedy_decoder(dtype):
input0_shape = [20, 8, 128]
input1_shape = [20, 8]
expected_shape = [8, 20, 1, 1]
parameter_input0 = ng.parameter(input0_shape, name="Input0", dtype=dtype)
parameter_input1 = ng.parameter(input1_shape, name="Input1", dtype=dtype)
node = ng.ctc_greedy_decoder(parameter_input0, parameter_input1)
assert node.get_type_name() == "CTCGreedyDecoder"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == expected_shape
@pytest.mark.parametrize("dtype", np_types)
def test_deformable_convolution(dtype):
strides = np.array([1, 1])
pads_begin = np.array([0, 0])
pads_end = np.array([0, 0])
dilations = np.array([1, 1])
input0_shape = [1, 1, 9, 9]
input1_shape = [1, 1, 9, 9]
input2_shape = [1, 1, 3, 3]
expected_shape = [1, 1, 7, 7]
parameter_input0 = ng.parameter(input0_shape, name="Input0", dtype=dtype)
parameter_input1 = ng.parameter(input1_shape, name="Input1", dtype=dtype)
parameter_input2 = ng.parameter(input2_shape, name="Input2", dtype=dtype)
node = ng.deformable_convolution(
parameter_input0, parameter_input1, parameter_input2, strides, pads_begin, pads_end, dilations,
)
assert node.get_type_name() == "DeformableConvolution"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == expected_shape
@pytest.mark.parametrize("dtype", np_types)
def test_deformable_psroi_pooling(dtype):
output_dim = 8
spatial_scale = 0.0625
group_size = 7
mode = "bilinear_deformable"
spatial_bins_x = 4
spatial_bins_y = 4
trans_std = 0.1
part_size = 7
input0_shape = [1, 392, 38, 63]
input1_shape = [300, 5]
input2_shape = [300, 2, 7, 7]
expected_shape = [300, 8, 7, 7]
parameter_input0 = ng.parameter(input0_shape, name="Input0", dtype=dtype)
parameter_input1 = ng.parameter(input1_shape, name="Input1", dtype=dtype)
parameter_input2 = ng.parameter(input2_shape, name="Input2", dtype=dtype)
node = ng.deformable_psroi_pooling(
parameter_input0,
parameter_input1,
output_dim,
spatial_scale,
group_size,
mode,
spatial_bins_x,
spatial_bins_y,
trans_std,
part_size,
offsets=parameter_input2,
)
assert node.get_type_name() == "DeformablePSROIPooling"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == expected_shape
@pytest.mark.parametrize("dtype", np_types)
def test_floor_mod(dtype):
input0_shape = [8, 1, 6, 1]
input1_shape = [7, 1, 5]
expected_shape = [8, 7, 6, 5]
parameter_input0 = ng.parameter(input0_shape, name="Input0", dtype=dtype)
parameter_input1 = ng.parameter(input1_shape, name="Input1", dtype=dtype)
node = ng.floor_mod(parameter_input0, parameter_input1)
assert node.get_type_name() == "FloorMod"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == expected_shape
@pytest.mark.parametrize("dtype", np_types)
def test_gather_tree(dtype):
input0_shape = [100, 1, 10]
input1_shape = [100, 1, 10]
input2_shape = [1]
input3_shape = []
expected_shape = [100, 1, 10]
parameter_input0 = ng.parameter(input0_shape, name="Input0", dtype=dtype)
parameter_input1 = ng.parameter(input1_shape, name="Input1", dtype=dtype)
parameter_input2 = ng.parameter(input2_shape, name="Input2", dtype=dtype)
parameter_input3 = ng.parameter(input3_shape, name="Input3", dtype=dtype)
node = ng.gather_tree(parameter_input0, parameter_input1, parameter_input2, parameter_input3)
assert node.get_type_name() == "GatherTree"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == expected_shape
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_lstm_cell_operator(dtype):
batch_size = 1
input_size = 16
hidden_size = 128
X_shape = [batch_size, input_size]
H_t_shape = [batch_size, hidden_size]
C_t_shape = [batch_size, hidden_size]
W_shape = [4 * hidden_size, input_size]
R_shape = [4 * hidden_size, hidden_size]
B_shape = [4 * hidden_size]
parameter_X = ng.parameter(X_shape, name="X", dtype=dtype)
parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype)
parameter_C_t = ng.parameter(C_t_shape, name="C_t", dtype=dtype)
parameter_W = ng.parameter(W_shape, name="W", dtype=dtype)
parameter_R = ng.parameter(R_shape, name="R", dtype=dtype)
parameter_B = ng.parameter(B_shape, name="B", dtype=dtype)
expected_shape = [1, 128]
node_default = ng.lstm_cell(
parameter_X, parameter_H_t, parameter_C_t, parameter_W, parameter_R, parameter_B, hidden_size,
)
assert node_default.get_type_name() == "LSTMCell"
assert node_default.get_output_size() == 2
assert list(node_default.get_output_shape(0)) == expected_shape
assert list(node_default.get_output_shape(1)) == expected_shape
activations = ["tanh", "Sigmoid", "RELU"]
activation_alpha = [1.0, 2.0, 3.0]
activation_beta = [3.0, 2.0, 1.0]
clip = 0.5
node_param = ng.lstm_cell(
parameter_X,
parameter_H_t,
parameter_C_t,
parameter_W,
parameter_R,
parameter_B,
hidden_size,
activations,
activation_alpha,
activation_beta,
clip,
)
assert node_param.get_type_name() == "LSTMCell"
assert node_param.get_output_size() == 2
assert list(node_param.get_output_shape(0)) == expected_shape
assert list(node_param.get_output_shape(1)) == expected_shape
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_lstm_cell_operator_opset1(dtype):
batch_size = 1
input_size = 16
hidden_size = 128
X_shape = [batch_size, input_size]
H_t_shape = [batch_size, hidden_size]
C_t_shape = [batch_size, hidden_size]
W_shape = [4 * hidden_size, input_size]
R_shape = [4 * hidden_size, hidden_size]
B_shape = [4 * hidden_size]
parameter_X = ng.parameter(X_shape, name="X", dtype=dtype)
parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype)
parameter_C_t = ng.parameter(C_t_shape, name="C_t", dtype=dtype)
parameter_W = ng.parameter(W_shape, name="W", dtype=dtype)
parameter_R = ng.parameter(R_shape, name="R", dtype=dtype)
parameter_B = ng.parameter(B_shape, name="B", dtype=dtype)
expected_shape = [1, 128]
node_default = ng_opset1.lstm_cell(
parameter_X, parameter_H_t, parameter_C_t, parameter_W, parameter_R, parameter_B, hidden_size,
)
assert node_default.get_type_name() == "LSTMCell"
assert node_default.get_output_size() == 2
assert list(node_default.get_output_shape(0)) == expected_shape
assert list(node_default.get_output_shape(1)) == expected_shape
activations = ["tanh", "Sigmoid", "RELU"]
activation_alpha = [1.0, 2.0, 3.0]
activation_beta = [3.0, 2.0, 1.0]
clip = 0.5
node_param = ng_opset1.lstm_cell(
parameter_X,
parameter_H_t,
parameter_C_t,
parameter_W,
parameter_R,
parameter_B,
hidden_size,
activations,
activation_alpha,
activation_beta,
clip,
)
assert node_param.get_type_name() == "LSTMCell"
assert node_param.get_output_size() == 2
assert list(node_param.get_output_shape(0)) == expected_shape
assert list(node_param.get_output_shape(1)) == expected_shape
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_lstm_sequence_operator_bidirectional_opset1(dtype):
batch_size = 1
input_size = 16
hidden_size = 128
num_directions = 2
seq_length = 2
X_shape = [batch_size, seq_length, input_size]
H_t_shape = [batch_size, num_directions, hidden_size]
C_t_shape = [batch_size, num_directions, hidden_size]
seq_len_shape = [batch_size]
W_shape = [num_directions, 4 * hidden_size, input_size]
R_shape = [num_directions, 4 * hidden_size, hidden_size]
B_shape = [num_directions, 4 * hidden_size]
parameter_X = ng.parameter(X_shape, name="X", dtype=dtype)
parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype)
parameter_C_t = ng.parameter(C_t_shape, name="C_t", dtype=dtype)
parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32)
parameter_W = ng.parameter(W_shape, name="W", dtype=dtype)
parameter_R = ng.parameter(R_shape, name="R", dtype=dtype)
parameter_B = ng.parameter(B_shape, name="B", dtype=dtype)
direction = "BIDIRECTIONAL"
node = ng_opset1.lstm_sequence(
parameter_X,
parameter_H_t,
parameter_C_t,
parameter_seq_len,
parameter_W,
parameter_R,
parameter_B,
hidden_size,
direction,
)
assert node.get_type_name() == "LSTMSequence"
assert node.get_output_size() == 3
activations = ["RELU", "tanh", "Sigmoid"]
activation_alpha = [1.0, 2.0, 3.0]
activation_beta = [3.0, 2.0, 1.0]
clip = 1.22
node_param = ng_opset1.lstm_sequence(
parameter_X,
parameter_H_t,
parameter_C_t,
parameter_seq_len,
parameter_W,
parameter_R,
parameter_B,
hidden_size,
direction,
activations,
activation_alpha,
activation_beta,
clip,
)
assert node_param.get_type_name() == "LSTMSequence"
assert node_param.get_output_size() == 3
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_lstm_sequence_operator_reverse_opset1(dtype):
batch_size = 2
input_size = 4
hidden_size = 3
num_directions = 1
seq_length = 2
X_shape = [batch_size, seq_length, input_size]
H_t_shape = [batch_size, num_directions, hidden_size]
C_t_shape = [batch_size, num_directions, hidden_size]
seq_len_shape = [batch_size]
W_shape = [num_directions, 4 * hidden_size, input_size]
R_shape = [num_directions, 4 * hidden_size, hidden_size]
B_shape = [num_directions, 4 * hidden_size]
parameter_X = ng.parameter(X_shape, name="X", dtype=dtype)
parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype)
parameter_C_t = ng.parameter(C_t_shape, name="C_t", dtype=dtype)
parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32)
parameter_W = ng.parameter(W_shape, name="W", dtype=dtype)
parameter_R = ng.parameter(R_shape, name="R", dtype=dtype)
parameter_B = ng.parameter(B_shape, name="B", dtype=dtype)
direction = "REVERSE"
node_default = ng_opset1.lstm_sequence(
parameter_X,
parameter_H_t,
parameter_C_t,
parameter_seq_len,
parameter_W,
parameter_R,
parameter_B,
hidden_size,
direction,
)
assert node_default.get_type_name() == "LSTMSequence"
assert node_default.get_output_size() == 3
activations = ["RELU", "tanh", "Sigmoid"]
activation_alpha = [1.0, 2.0, 3.0]
activation_beta = [3.0, 2.0, 1.0]
clip = 1.22
node_param = ng_opset1.lstm_sequence(
parameter_X,
parameter_H_t,
parameter_C_t,
parameter_seq_len,
parameter_W,
parameter_R,
parameter_B,
hidden_size,
direction,
activations,
activation_alpha,
activation_beta,
clip,
)
assert node_param.get_type_name() == "LSTMSequence"
assert node_param.get_output_size() == 3
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_lstm_sequence_operator_forward_opset1(dtype):
batch_size = 2
input_size = 4
hidden_size = 3
num_directions = 1
seq_length = 2
X_shape = [batch_size, seq_length, input_size]
H_t_shape = [batch_size, num_directions, hidden_size]
C_t_shape = [batch_size, num_directions, hidden_size]
seq_len_shape = [batch_size]
W_shape = [num_directions, 4 * hidden_size, input_size]
R_shape = [num_directions, 4 * hidden_size, hidden_size]
B_shape = [num_directions, 4 * hidden_size]
parameter_X = ng.parameter(X_shape, name="X", dtype=dtype)
parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype)
parameter_C_t = ng.parameter(C_t_shape, name="C_t", dtype=dtype)
parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32)
parameter_W = ng.parameter(W_shape, name="W", dtype=dtype)
parameter_R = ng.parameter(R_shape, name="R", dtype=dtype)
parameter_B = ng.parameter(B_shape, name="B", dtype=dtype)
direction = "forward"
node_default = ng_opset1.lstm_sequence(
parameter_X,
parameter_H_t,
parameter_C_t,
parameter_seq_len,
parameter_W,
parameter_R,
parameter_B,
hidden_size,
direction,
)
assert node_default.get_type_name() == "LSTMSequence"
assert node_default.get_output_size() == 3
activations = ["RELU", "tanh", "Sigmoid"]
activation_alpha = [2.0]
activation_beta = [1.0]
clip = 0.5
node = ng_opset1.lstm_sequence(
parameter_X,
parameter_H_t,
parameter_C_t,
parameter_seq_len,
parameter_W,
parameter_R,
parameter_B,
hidden_size,
direction,
activations,
activation_alpha,
activation_beta,
clip,
)
assert node.get_type_name() == "LSTMSequence"
assert node.get_output_size() == 3
def test_gru_cell_operator():
batch_size = 1
input_size = 16
hidden_size = 128
X_shape = [batch_size, input_size]
H_t_shape = [batch_size, hidden_size]
W_shape = [3 * hidden_size, input_size]
R_shape = [3 * hidden_size, hidden_size]
B_shape = [3 * hidden_size]
parameter_X = ng.parameter(X_shape, name="X", dtype=np.float32)
parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=np.float32)
parameter_W = ng.parameter(W_shape, name="W", dtype=np.float32)
parameter_R = ng.parameter(R_shape, name="R", dtype=np.float32)
parameter_B = ng.parameter(B_shape, name="B", dtype=np.float32)
expected_shape = [1, 128]
node_default = ng.gru_cell(parameter_X, parameter_H_t, parameter_W, parameter_R, parameter_B, hidden_size)
assert node_default.get_type_name() == "GRUCell"
assert node_default.get_output_size() == 1
assert list(node_default.get_output_shape(0)) == expected_shape
activations = ["tanh", "relu"]
activations_alpha = [1.0, 2.0]
activations_beta = [1.0, 2.0]
clip = 0.5
linear_before_reset = True
# If *linear_before_reset* is set True, then B tensor shape must be [4 * hidden_size]
B_shape = [4 * hidden_size]
parameter_B = ng.parameter(B_shape, name="B", dtype=np.float32)
node_param = ng.gru_cell(
parameter_X,
parameter_H_t,
parameter_W,
parameter_R,
parameter_B,
hidden_size,
activations,
activations_alpha,
activations_beta,
clip,
linear_before_reset,
)
assert node_param.get_type_name() == "GRUCell"
assert node_param.get_output_size() == 1
assert list(node_param.get_output_shape(0)) == expected_shape
def test_gru_sequence():
batch_size = 2
input_size = 16
hidden_size = 32
seq_len = 8
seq_lengths = [seq_len] * batch_size
num_directions = 1
direction = "FORWARD"
X_shape = [batch_size, seq_len, input_size]
H_t_shape = [batch_size, num_directions, hidden_size]
W_shape = [num_directions, 3 * hidden_size, input_size]
R_shape = [num_directions, 3 * hidden_size, hidden_size]
B_shape = [num_directions, 3 * hidden_size]
parameter_X = ng.parameter(X_shape, name="X", dtype=np.float32)
parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=np.float32)
parameter_W = ng.parameter(W_shape, name="W", dtype=np.float32)
parameter_R = ng.parameter(R_shape, name="R", dtype=np.float32)
parameter_B = ng.parameter(B_shape, name="B", dtype=np.float32)
expected_shape_y = [batch_size, num_directions, seq_len, hidden_size]
expected_shape_h = [batch_size, num_directions, hidden_size]
node_default = ng.gru_sequence(
parameter_X,
parameter_H_t,
seq_lengths,
parameter_W,
parameter_R,
parameter_B,
hidden_size,
direction,
)
assert node_default.get_type_name() == "GRUSequence"
assert node_default.get_output_size() == 2
assert list(node_default.get_output_shape(0)) == expected_shape_y
assert list(node_default.get_output_shape(1)) == expected_shape_h
activations = ["tanh", "relu"]
activations_alpha = [1.0, 2.0]
activations_beta = [1.0, 2.0]
clip = 0.5
linear_before_reset = True
# If *linear_before_reset* is set True, then B tensor shape must be [4 * hidden_size]
B_shape = [num_directions, 4 * hidden_size]
parameter_B = ng.parameter(B_shape, name="B", dtype=np.float32)
node_param = ng.gru_sequence(
parameter_X,
parameter_H_t,
seq_lengths,
parameter_W,
parameter_R,
parameter_B,
hidden_size,
direction,
activations,
activations_alpha,
activations_beta,
clip,
linear_before_reset,
)
assert node_param.get_type_name() == "GRUSequence"
assert node_param.get_output_size() == 2
assert list(node_param.get_output_shape(0)) == expected_shape_y
assert list(node_param.get_output_shape(1)) == expected_shape_h
def test_rnn_sequence():
batch_size = 2
input_size = 16
hidden_size = 32
seq_len = 8
seq_lengths = [seq_len] * batch_size
num_directions = 1
direction = "FORWARD"
X_shape = [batch_size, seq_len, input_size]
H_t_shape = [batch_size, num_directions, hidden_size]
W_shape = [num_directions, hidden_size, input_size]
R_shape = [num_directions, hidden_size, hidden_size]
B_shape = [num_directions, hidden_size]
parameter_X = ng.parameter(X_shape, name="X", dtype=np.float32)
parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=np.float32)
parameter_W = ng.parameter(W_shape, name="W", dtype=np.float32)
parameter_R = ng.parameter(R_shape, name="R", dtype=np.float32)
parameter_B = ng.parameter(B_shape, name="B", dtype=np.float32)
expected_shape_y = [batch_size, num_directions, seq_len, hidden_size]
expected_shape_h = [batch_size, num_directions, hidden_size]
node_default = ng.rnn_sequence(
parameter_X,
parameter_H_t,
seq_lengths,
parameter_W,
parameter_R,
parameter_B,
hidden_size,
direction,
)
assert node_default.get_type_name() == "RNNSequence"
assert node_default.get_output_size() == 2
assert list(node_default.get_output_shape(0)) == expected_shape_y
assert list(node_default.get_output_shape(1)) == expected_shape_h
activations = ["relu"]
activations_alpha = [2.0]
activations_beta = [1.0]
clip = 0.5
node_param = ng.rnn_sequence(
parameter_X,
parameter_H_t,
seq_lengths,
parameter_W,
parameter_R,
parameter_B,
hidden_size,
direction,
activations,
activations_alpha,
activations_beta,
clip,
)
assert node_param.get_type_name() == "RNNSequence"
assert node_param.get_output_size() == 2
assert list(node_param.get_output_shape(0)) == expected_shape_y
assert list(node_param.get_output_shape(1)) == expected_shape_h
@skip_segfault
def test_loop():
trip_count = 8
condition = True
node_default = ng.loop(trip_count, condition)
assert node_default.get_type_name() == "Loop"
def test_roi_pooling():
inputs = ng.parameter([2, 3, 4, 5], dtype=np.float32)
coords = ng.parameter([150, 5], dtype=np.float32)
node = ng.roi_pooling(inputs, coords, [6, 6], 0.0625, "Max")
assert node.get_type_name() == "ROIPooling"
assert node.get_output_size() == [6, 6]
assert list(node.get_output_shape(0)) == [150, 3, 6, 6]
assert node.get_output_element_type(0) == Type.f32
def test_psroi_pooling():
inputs = ng.parameter([1, 3, 4, 5], dtype=np.float32)
coords = ng.parameter([150, 5], dtype=np.float32)
node = ng.psroi_pooling(inputs, coords, 2, 6, 0.0625, 0, 0, "Avg")
assert node.get_type_name() == "PSROIPooling"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == [150, 2, 6, 6]
assert node.get_output_element_type(0) == Type.f32
def test_convert_like():
parameter_data = ng.parameter([1, 2, 3, 4], name="data", dtype=np.float32)
like = ng.constant(1, dtype=np.int8)
node = ng.convert_like(parameter_data, like)
assert node.get_type_name() == "ConvertLike"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == [1, 2, 3, 4]
assert node.get_output_element_type(0) == Type.i8
def test_bucketize():
data = ng.parameter([4, 3, 2, 1], name="data", dtype=np.float32)
buckets = ng.parameter([5], name="buckets", dtype=np.int64)
node = ng.bucketize(data, buckets, "i32")
assert node.get_type_name() == "Bucketize"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == [4, 3, 2, 1]
assert node.get_output_element_type(0) == Type.i32
def test_region_yolo():
data = ng.parameter([1, 125, 13, 13], name="input", dtype=np.float32)
num_coords = 4
num_classes = 80
num_regions = 1
mask = [6, 7, 8]
axis = 0
end_axis = 3
do_softmax = False
node = ng.region_yolo(data, num_coords, num_classes, num_regions, do_softmax, mask, axis, end_axis)
assert node.get_type_name() == "RegionYolo"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == [1, (80 + 4 + 1) * 3, 13, 13]
assert node.get_output_element_type(0) == Type.f32
def test_reorg_yolo():
data = ng.parameter([2, 24, 34, 62], name="input", dtype=np.int32)
stride = [2]
node = ng.reorg_yolo(data, stride)
assert node.get_type_name() == "ReorgYolo"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == [2, 96, 17, 31]
assert node.get_output_element_type(0) == Type.i32
def test_embedding_bag_offsets_sum_1():
emb_table = ng.parameter([5, 2], name="emb_table", dtype=np.float32)
indices = ng.parameter([4], name="indices", dtype=np.int64)
offsets = ng.parameter([3], name="offsets", dtype=np.int64)
default_index = ng.parameter([], name="default_index", dtype=np.int64)
node = ng.embedding_bag_offsets_sum(emb_table, indices, offsets, default_index)
assert node.get_type_name() == "EmbeddingBagOffsetsSum"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == [3, 2]
assert node.get_output_element_type(0) == Type.f32
def test_embedding_segments_sum_all_inputs():
emb_table = ng.parameter([5, 2], name="emb_table", dtype=np.float32)
indices = ng.parameter([4], name="indices", dtype=np.int64)
segment_ids = ng.parameter([4], name="segment_ids", dtype=np.int64)
num_segments = ng.parameter([], name="num_segments", dtype=np.int64)
default_index = ng.parameter([], name="default_index", dtype=np.int64)
per_sample_weights = ng.parameter([4], name="per_sample_weights", dtype=np.float32)
node = ng.embedding_segments_sum(
emb_table, indices, segment_ids, num_segments, default_index, per_sample_weights
)
assert node.get_type_name() == "EmbeddingSegmentsSum"
assert node.get_output_size() == 1
assert node.get_output_partial_shape(0).same_scheme(PartialShape([-1, 2]))
assert node.get_output_element_type(0) == Type.f32
def test_embedding_segments_sum_with_some_opt_inputs():
emb_table = ng.parameter([5, 2], name="emb_table", dtype=np.float32)
indices = ng.parameter([4], name="indices", dtype=np.int64)
segment_ids = ng.parameter([4], name="segment_ids", dtype=np.int64)
num_segments = ng.parameter([], name="num_segments", dtype=np.int64)
# only 1 out of 3 optional inputs
node = ng.embedding_segments_sum(emb_table, indices, segment_ids, num_segments)
assert node.get_type_name() == "EmbeddingSegmentsSum"
assert node.get_output_size() == 1
assert node.get_output_partial_shape(0).same_scheme(PartialShape([-1, 2]))
assert node.get_output_element_type(0) == Type.f32
def test_embedding_bag_packed_sum():
emb_table = ng.parameter([5, 2], name="emb_table", dtype=np.float32)
indices = ng.parameter([3, 3], name="indices", dtype=np.int64)
per_sample_weights = ng.parameter([3, 3], name="per_sample_weights", dtype=np.float32)
# only 1 out of 3 optional inputs
node = ng.embedding_bag_packed_sum(emb_table, indices, per_sample_weights)
assert node.get_type_name() == "EmbeddingBagPackedSum"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == [3, 2]
assert node.get_output_element_type(0) == Type.f32
@pytest.mark.parametrize("dtype", integral_np_types)
def test_interpolate(dtype):
image_shape = [1, 3, 1024, 1024]
output_shape = [64, 64]
attributes = {
"axes": [2, 3],
"mode": "cubic",
"pads_begin": np.array([2, 2], dtype=dtype),
}
image_node = ng.parameter(image_shape, dtype, name="Image")
node = ng.interpolate(image_node, output_shape, attributes)
expected_shape = [1, 3, 64, 64]
assert node.get_type_name() == "Interpolate"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == expected_shape
@pytest.mark.parametrize(
"int_dtype, fp_dtype",
[
(np.int8, np.float32),
(np.int16, np.float32),
(np.int32, np.float32),
(np.int64, np.float32),
(np.uint8, np.float32),
(np.uint16, np.float32),
(np.uint32, np.float32),
(np.uint64, np.float32),
(np.int32, np.float16),
(np.int32, np.float64),
],
)
def test_prior_box(int_dtype, fp_dtype):
image_shape = np.array([64, 64], dtype=int_dtype)
attributes = {
"offset": fp_dtype(0),
"min_size": np.array([2, 3], dtype=fp_dtype),
"aspect_ratio": np.array([1.5, 2.0, 2.5], dtype=fp_dtype),
"scale_all_sizes": False
}
layer_shape = ng.constant(np.array([32, 32], dtype=int_dtype), int_dtype)
node = ng.prior_box(layer_shape, image_shape, attributes)
assert node.get_type_name() == "PriorBox"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == [2, 20480]
@pytest.mark.parametrize(
"int_dtype, fp_dtype",
[
(np.int8, np.float32),
(np.int16, np.float32),
(np.int32, np.float32),
(np.int64, np.float32),
(np.uint8, np.float32),
(np.uint16, np.float32),
(np.uint32, np.float32),
(np.uint64, np.float32),
(np.int32, np.float16),
(np.int32, np.float64),
],
)
def test_prior_box_clustered(int_dtype, fp_dtype):
image_size = np.array([64, 64], dtype=int_dtype)
attributes = {
"offset": fp_dtype(0.5),
"width": np.array([4.0, 2.0, 3.2], dtype=fp_dtype),
"height": np.array([1.0, 2.0, 1.0], dtype=fp_dtype),
}
output_size = ng.constant(np.array([19, 19], dtype=int_dtype), int_dtype)
node = ng.prior_box_clustered(output_size, image_size, attributes)
assert node.get_type_name() == "PriorBoxClustered"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == [2, 4332]
@pytest.mark.parametrize(
"int_dtype, fp_dtype",
[
(np.int8, np.float32),
(np.int16, np.float32),
(np.int32, np.float32),
(np.int64, np.float32),
(np.uint8, np.float32),
(np.uint16, np.float32),
(np.uint32, np.float32),
(np.uint64, np.float32),
(np.int32, np.float16),
(np.int32, np.float64),
],
)
def test_detection_output(int_dtype, fp_dtype):
attributes = {
"num_classes": int_dtype(85),
"keep_top_k": np.array([64], dtype=int_dtype),
"nms_threshold": fp_dtype(0.645),
}
box_logits = ng.parameter([4, 1, 5, 5], fp_dtype, "box_logits")
class_preds = ng.parameter([2, 1, 4, 5], fp_dtype, "class_preds")
proposals = ng.parameter([2, 1, 4, 5], fp_dtype, "proposals")
aux_class_preds = ng.parameter([2, 1, 4, 5], fp_dtype, "aux_class_preds")
aux_box_preds = ng.parameter([2, 1, 4, 5], fp_dtype, "aux_box_preds")
node = ng.detection_output(box_logits, class_preds, proposals, attributes, aux_class_preds, aux_box_preds)
assert node.get_type_name() == "DetectionOutput"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == [1, 1, 256, 7]
@pytest.mark.parametrize(
"int_dtype, fp_dtype",
[
(np.uint8, np.float32),
(np.uint16, np.float32),
(np.uint32, np.float32),
(np.uint64, np.float32),
(np.uint32, np.float16),
(np.uint32, np.float64),
],
)
def test_proposal(int_dtype, fp_dtype):
attributes = {
"base_size": int_dtype(1),
"pre_nms_topn": int_dtype(20),
"post_nms_topn": int_dtype(64),
"nms_thresh": fp_dtype(0.34),
"feat_stride": int_dtype(16),
"min_size": int_dtype(32),
"ratio": np.array([0.1, 1.5, 2.0, 2.5], dtype=fp_dtype),
"scale": np.array([2, 3, 3, 4], dtype=fp_dtype),
}
batch_size = 7
class_probs = ng.parameter([batch_size, 12, 34, 62], fp_dtype, "class_probs")
bbox_deltas = ng.parameter([batch_size, 24, 34, 62], fp_dtype, "bbox_deltas")
image_shape = ng.parameter([3], fp_dtype, "image_shape")
node = ng.proposal(class_probs, bbox_deltas, image_shape, attributes)
assert node.get_type_name() == "Proposal"
assert node.get_output_size() == 2
assert list(node.get_output_shape(0)) == [batch_size * attributes["post_nms_topn"], 5]
def test_tensor_iterator():
from ngraph.utils.tensor_iterator_types import (
GraphBody,
TensorIteratorSliceInputDesc,
TensorIteratorMergedInputDesc,
TensorIteratorInvariantInputDesc,
TensorIteratorBodyOutputDesc,
TensorIteratorConcatOutputDesc,
)
# Body parameters
body_timestep = ng.parameter([], np.int32, "timestep")
body_data_in = ng.parameter([1, 2, 2], np.float32, "body_in")
body_prev_cma = ng.parameter([2, 2], np.float32, "body_prev_cma")
body_const_one = ng.parameter([], np.int32, "body_const_one")
# CMA = cumulative moving average
prev_cum_sum = ng.multiply(ng.convert(body_timestep, "f32"), body_prev_cma)
curr_cum_sum = ng.add(prev_cum_sum, ng.squeeze(body_data_in, [0]))
elem_cnt = ng.add(body_const_one, body_timestep)
curr_cma = ng.divide(curr_cum_sum, ng.convert(elem_cnt, "f32"))
cma_hist = ng.unsqueeze(curr_cma, [0])
# TI inputs
data = ng.parameter([16, 2, 2], np.float32, "data")
# Iterations count
zero = ng.constant(0, dtype=np.int32)
one = ng.constant(1, dtype=np.int32)
initial_cma = ng.constant(np.zeros([2, 2], dtype=np.float32), dtype=np.float32)
iter_cnt = ng.range(zero, np.int32(16), np.int32(1))
ti_inputs = [iter_cnt, data, initial_cma, one]
graph_body = GraphBody([body_timestep, body_data_in, body_prev_cma, body_const_one], [curr_cma, cma_hist])
ti_slice_input_desc = [
# timestep
# input_idx, body_param_idx, start, stride, part_size, end, axis
TensorIteratorSliceInputDesc(0, 0, 0, 1, 1, -1, 0),
# data
TensorIteratorSliceInputDesc(1, 1, 0, 1, 1, -1, 0),
]
ti_merged_input_desc = [
# body prev/curr_cma
TensorIteratorMergedInputDesc(2, 2, 0),
]
ti_invariant_input_desc = [
# body const one
TensorIteratorInvariantInputDesc(3, 3),
]
# TI outputs
ti_body_output_desc = [
# final average
TensorIteratorBodyOutputDesc(0, 0, -1),
]
ti_concat_output_desc = [
# history of cma
TensorIteratorConcatOutputDesc(1, 1, 0, 1, 1, -1, 0),
]
node = ng.tensor_iterator(
ti_inputs,
graph_body,
ti_slice_input_desc,
ti_merged_input_desc,
ti_invariant_input_desc,
ti_body_output_desc,
ti_concat_output_desc,
)
assert node.get_type_name() == "TensorIterator"
assert node.get_output_size() == 2
# final average
assert list(node.get_output_shape(0)) == [2, 2]
# cma history
assert list(node.get_output_shape(1)) == [16, 2, 2]
def test_read_value():
init_value = ng.parameter([2, 2], name="init_value", dtype=np.int32)
node = ng.read_value(init_value, "var_id_667")
assert node.get_type_name() == "ReadValue"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == [2, 2]
assert node.get_output_element_type(0) == Type.i32
def test_assign():
input_data = ng.parameter([5, 7], name="input_data", dtype=np.int32)
rv = ng.read_value(input_data, "var_id_667")
node = ng.assign(rv, "var_id_667")
assert node.get_type_name() == "Assign"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == [5, 7]
assert node.get_output_element_type(0) == Type.i32
def test_extract_image_patches():
image = ng.parameter([64, 3, 10, 10], name="image", dtype=np.int32)
sizes = [3, 3]
strides = [5, 5]
rates = [1, 1]
padding = "VALID"
node = ng.extract_image_patches(image, sizes, strides, rates, padding)
assert node.get_type_name() == "ExtractImagePatches"
assert node.get_output_size() == 1
assert list(node.get_output_shape(0)) == [64, 27, 2, 2]
assert node.get_output_element_type(0) == Type.i32
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_lstm_sequence_operator_bidirectional(dtype):
batch_size = 1
input_size = 16
hidden_size = 128
num_directions = 2
seq_length = 2
X_shape = [batch_size, seq_length, input_size]
H_t_shape = [batch_size, num_directions, hidden_size]
C_t_shape = [batch_size, num_directions, hidden_size]
seq_len_shape = [batch_size]
W_shape = [num_directions, 4 * hidden_size, input_size]
R_shape = [num_directions, 4 * hidden_size, hidden_size]
B_shape = [num_directions, 4 * hidden_size]
parameter_X = ng.parameter(X_shape, name="X", dtype=dtype)
parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype)
parameter_C_t = ng.parameter(C_t_shape, name="C_t", dtype=dtype)
parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32)
parameter_W = ng.parameter(W_shape, name="W", dtype=dtype)
parameter_R = ng.parameter(R_shape, name="R", dtype=dtype)
parameter_B = ng.parameter(B_shape, name="B", dtype=dtype)
direction = "BIDIRECTIONAL"
node = ng.lstm_sequence(
parameter_X,
parameter_H_t,
parameter_C_t,
parameter_seq_len,
parameter_W,
parameter_R,
parameter_B,
hidden_size,
direction,
)
assert node.get_type_name() == "LSTMSequence"
assert node.get_output_size() == 3
activations = ["RELU", "tanh", "Sigmoid"]
activation_alpha = [1.0, 2.0, 3.0]
activation_beta = [3.0, 2.0, 1.0]
clip = 1.22
node_param = ng.lstm_sequence(
parameter_X,
parameter_H_t,
parameter_C_t,
parameter_seq_len,
parameter_W,
parameter_R,
parameter_B,
hidden_size,
direction,
activations,
activation_alpha,
activation_beta,
clip,
)
assert node_param.get_type_name() == "LSTMSequence"
assert node_param.get_output_size() == 3
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_lstm_sequence_operator_reverse(dtype):
batch_size = 2
input_size = 4
hidden_size = 3
num_directions = 1
seq_length = 2
X_shape = [batch_size, seq_length, input_size]
H_t_shape = [batch_size, num_directions, hidden_size]
C_t_shape = [batch_size, num_directions, hidden_size]
seq_len_shape = [batch_size]
W_shape = [num_directions, 4 * hidden_size, input_size]
R_shape = [num_directions, 4 * hidden_size, hidden_size]
B_shape = [num_directions, 4 * hidden_size]
parameter_X = ng.parameter(X_shape, name="X", dtype=dtype)
parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype)
parameter_C_t = ng.parameter(C_t_shape, name="C_t", dtype=dtype)
parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32)
parameter_W = ng.parameter(W_shape, name="W", dtype=dtype)
parameter_R = ng.parameter(R_shape, name="R", dtype=dtype)
parameter_B = ng.parameter(B_shape, name="B", dtype=dtype)
direction = "REVERSE"
node_default = ng.lstm_sequence(
parameter_X,
parameter_H_t,
parameter_C_t,
parameter_seq_len,
parameter_W,
parameter_R,
parameter_B,
hidden_size,
direction,
)
assert node_default.get_type_name() == "LSTMSequence"
assert node_default.get_output_size() == 3
activations = ["RELU", "tanh", "Sigmoid"]
activation_alpha = [1.0, 2.0, 3.0]
activation_beta = [3.0, 2.0, 1.0]
clip = 1.22
node_param = ng.lstm_sequence(
parameter_X,
parameter_H_t,
parameter_C_t,
parameter_seq_len,
parameter_W,
parameter_R,
parameter_B,
hidden_size,
direction,
activations,
activation_alpha,
activation_beta,
clip,
)
assert node_param.get_type_name() == "LSTMSequence"
assert node_param.get_output_size() == 3
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_lstm_sequence_operator_forward(dtype):
batch_size = 2
input_size = 4
hidden_size = 3
num_directions = 1
seq_length = 2
X_shape = [batch_size, seq_length, input_size]
H_t_shape = [batch_size, num_directions, hidden_size]
C_t_shape = [batch_size, num_directions, hidden_size]
seq_len_shape = [batch_size]
W_shape = [num_directions, 4 * hidden_size, input_size]
R_shape = [num_directions, 4 * hidden_size, hidden_size]
B_shape = [num_directions, 4 * hidden_size]
parameter_X = ng.parameter(X_shape, name="X", dtype=dtype)
parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype)
parameter_C_t = ng.parameter(C_t_shape, name="C_t", dtype=dtype)
parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32)
parameter_W = ng.parameter(W_shape, name="W", dtype=dtype)
parameter_R = ng.parameter(R_shape, name="R", dtype=dtype)
parameter_B = ng.parameter(B_shape, name="B", dtype=dtype)
direction = "forward"
node_default = ng.lstm_sequence(
parameter_X,
parameter_H_t,
parameter_C_t,
parameter_seq_len,
parameter_W,
parameter_R,
parameter_B,
hidden_size,
direction,
)
assert node_default.get_type_name() == "LSTMSequence"
assert node_default.get_output_size() == 3
activations = ["RELU", "tanh", "Sigmoid"]
activation_alpha = [2.0]
activation_beta = [1.0]
clip = 0.5
node = ng.lstm_sequence(
parameter_X,
parameter_H_t,
parameter_C_t,
parameter_seq_len,
parameter_W,
parameter_R,
parameter_B,
hidden_size,
direction,
activations,
activation_alpha,
activation_beta,
clip,
)
assert node.get_type_name() == "LSTMSequence"
assert node.get_output_size() == 3
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_gru_sequence_operator_bidirectional(dtype):
batch_size = 1
input_size = 16
hidden_size = 128
num_directions = 2
seq_length = 2
X_shape = [batch_size, seq_length, input_size]
H_t_shape = [batch_size, num_directions, hidden_size]
seq_len_shape = [batch_size]
W_shape = [num_directions, 3 * hidden_size, input_size]
R_shape = [num_directions, 3 * hidden_size, hidden_size]
B_shape = [num_directions, 3 * hidden_size]
parameter_X = ng.parameter(X_shape, name="X", dtype=dtype)
parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype)
parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32)
parameter_W = ng.parameter(W_shape, name="W", dtype=dtype)
parameter_R = ng.parameter(R_shape, name="R", dtype=dtype)
parameter_B = ng.parameter(B_shape, name="B", dtype=dtype)
direction = "BIDIRECTIONAL"
node = ng.gru_sequence(
parameter_X,
parameter_H_t,
parameter_seq_len,
parameter_W,
parameter_R,
parameter_B,
hidden_size,
direction,
)
assert node.get_type_name() == "GRUSequence"
assert node.get_output_size() == 2
activations = ["RELU", "tanh"]
activation_alpha = [1.0, 2.0, 3.0]
activation_beta = [3.0, 2.0, 1.0]
clip = 1.22
linear_before_reset = True
B_shape = [num_directions, 4 * hidden_size]
parameter_B = ng.parameter(B_shape, name="B", dtype=dtype)
node_param = ng.gru_sequence(
parameter_X,
parameter_H_t,
parameter_seq_len,
parameter_W,
parameter_R,
parameter_B,
hidden_size,
direction,
activations,
activation_alpha,
activation_beta,
clip,
linear_before_reset
)
assert node_param.get_type_name() == "GRUSequence"
assert node_param.get_output_size() == 2
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_gru_sequence_operator_reverse(dtype):
batch_size = 2
input_size = 4
hidden_size = 3
num_directions = 1
seq_length = 2
X_shape = [batch_size, seq_length, input_size]
H_t_shape = [batch_size, num_directions, hidden_size]
seq_len_shape = [batch_size]
W_shape = [num_directions, 3 * hidden_size, input_size]
R_shape = [num_directions, 3 * hidden_size, hidden_size]
B_shape = [num_directions, 3 * hidden_size]
parameter_X = ng.parameter(X_shape, name="X", dtype=dtype)
parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype)
parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32)
parameter_W = ng.parameter(W_shape, name="W", dtype=dtype)
parameter_R = ng.parameter(R_shape, name="R", dtype=dtype)
parameter_B = ng.parameter(B_shape, name="B", dtype=dtype)
direction = "REVERSE"
node_default = ng.gru_sequence(
parameter_X,
parameter_H_t,
parameter_seq_len,
parameter_W,
parameter_R,
parameter_B,
hidden_size,
direction,
)
assert node_default.get_type_name() == "GRUSequence"
assert node_default.get_output_size() == 2
activations = ["RELU", "tanh"]
activation_alpha = [1.0, 2.0, 3.0]
activation_beta = [3.0, 2.0, 1.0]
clip = 1.22
linear_before_reset = True
B_shape = [num_directions, 4 * hidden_size]
parameter_B = ng.parameter(B_shape, name="B", dtype=dtype)
node_param = ng.gru_sequence(
parameter_X,
parameter_H_t,
parameter_seq_len,
parameter_W,
parameter_R,
parameter_B,
hidden_size,
direction,
activations,
activation_alpha,
activation_beta,
clip,
linear_before_reset
)
assert node_param.get_type_name() == "GRUSequence"
assert node_param.get_output_size() == 2
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_gru_sequence_operator_forward(dtype):
batch_size = 2
input_size = 4
hidden_size = 3
num_directions = 1
seq_length = 2
X_shape = [batch_size, seq_length, input_size]
H_t_shape = [batch_size, num_directions, hidden_size]
seq_len_shape = [batch_size]
W_shape = [num_directions, 3 * hidden_size, input_size]
R_shape = [num_directions, 3 * hidden_size, hidden_size]
B_shape = [num_directions, 3 * hidden_size]
parameter_X = ng.parameter(X_shape, name="X", dtype=dtype)
parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype)
parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32)
parameter_W = ng.parameter(W_shape, name="W", dtype=dtype)
parameter_R = ng.parameter(R_shape, name="R", dtype=dtype)
parameter_B = ng.parameter(B_shape, name="B", dtype=dtype)
direction = "forward"
node_default = ng.gru_sequence(
parameter_X,
parameter_H_t,
parameter_seq_len,
parameter_W,
parameter_R,
parameter_B,
hidden_size,
direction,
)
assert node_default.get_type_name() == "GRUSequence"
assert node_default.get_output_size() == 2
activations = ["RELU", "tanh"]
activation_alpha = [2.0]
activation_beta = [1.0]
clip = 0.5
linear_before_reset = True
B_shape = [num_directions, 4 * hidden_size]
parameter_B = ng.parameter(B_shape, name="B", dtype=dtype)
node = ng.gru_sequence(
parameter_X,
parameter_H_t,
parameter_seq_len,
parameter_W,
parameter_R,
parameter_B,
hidden_size,
direction,
activations,
activation_alpha,
activation_beta,
clip,
linear_before_reset
)
assert node.get_type_name() == "GRUSequence"
assert node.get_output_size() == 2
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_rnn_sequence_operator_bidirectional(dtype):
batch_size = 1
input_size = 16
hidden_size = 128
num_directions = 2
seq_length = 2
X_shape = [batch_size, seq_length, input_size]
H_t_shape = [batch_size, num_directions, hidden_size]
seq_len_shape = [batch_size]
W_shape = [num_directions, hidden_size, input_size]
R_shape = [num_directions, hidden_size, hidden_size]
B_shape = [num_directions, hidden_size]
parameter_X = ng.parameter(X_shape, name="X", dtype=dtype)
parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype)
parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32)
parameter_W = ng.parameter(W_shape, name="W", dtype=dtype)
parameter_R = ng.parameter(R_shape, name="R", dtype=dtype)
parameter_B = ng.parameter(B_shape, name="B", dtype=dtype)
direction = "BIDIRECTIONAL"
node = ng.rnn_sequence(
parameter_X,
parameter_H_t,
parameter_seq_len,
parameter_W,
parameter_R,
parameter_B,
hidden_size,
direction,
)
assert node.get_type_name() == "RNNSequence"
assert node.get_output_size() == 2
activations = ["RELU", "tanh"]
activation_alpha = [1.0, 2.0, 3.0]
activation_beta = [3.0, 2.0, 1.0]
clip = 1.22
node_param = ng.rnn_sequence(
parameter_X,
parameter_H_t,
parameter_seq_len,
parameter_W,
parameter_R,
parameter_B,
hidden_size,
direction,
activations,
activation_alpha,
activation_beta,
clip,
)
assert node_param.get_type_name() == "RNNSequence"
assert node_param.get_output_size() == 2
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_rnn_sequence_operator_reverse(dtype):
batch_size = 2
input_size = 4
hidden_size = 3
num_directions = 1
seq_length = 2
X_shape = [batch_size, seq_length, input_size]
H_t_shape = [batch_size, num_directions, hidden_size]
seq_len_shape = [batch_size]
W_shape = [num_directions, hidden_size, input_size]
R_shape = [num_directions, hidden_size, hidden_size]
B_shape = [num_directions, hidden_size]
parameter_X = ng.parameter(X_shape, name="X", dtype=dtype)
parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype)
parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32)
parameter_W = ng.parameter(W_shape, name="W", dtype=dtype)
parameter_R = ng.parameter(R_shape, name="R", dtype=dtype)
parameter_B = ng.parameter(B_shape, name="B", dtype=dtype)
direction = "REVERSE"
node_default = ng.rnn_sequence(
parameter_X,
parameter_H_t,
parameter_seq_len,
parameter_W,
parameter_R,
parameter_B,
hidden_size,
direction,
)
assert node_default.get_type_name() == "RNNSequence"
assert node_default.get_output_size() == 2
activations = ["RELU", "tanh"]
activation_alpha = [1.0, 2.0, 3.0]
activation_beta = [3.0, 2.0, 1.0]
clip = 1.22
node_param = ng.rnn_sequence(
parameter_X,
parameter_H_t,
parameter_seq_len,
parameter_W,
parameter_R,
parameter_B,
hidden_size,
direction,
activations,
activation_alpha,
activation_beta,
clip,
)
assert node_param.get_type_name() == "RNNSequence"
assert node_param.get_output_size() == 2
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_rnn_sequence_operator_forward(dtype):
batch_size = 2
input_size = 4
hidden_size = 3
num_directions = 1
seq_length = 2
X_shape = [batch_size, seq_length, input_size]
H_t_shape = [batch_size, num_directions, hidden_size]
seq_len_shape = [batch_size]
W_shape = [num_directions, hidden_size, input_size]
R_shape = [num_directions, hidden_size, hidden_size]
B_shape = [num_directions, hidden_size]
parameter_X = ng.parameter(X_shape, name="X", dtype=dtype)
parameter_H_t = ng.parameter(H_t_shape, name="H_t", dtype=dtype)
parameter_seq_len = ng.parameter(seq_len_shape, name="seq_len", dtype=np.int32)
parameter_W = ng.parameter(W_shape, name="W", dtype=dtype)
parameter_R = ng.parameter(R_shape, name="R", dtype=dtype)
parameter_B = ng.parameter(B_shape, name="B", dtype=dtype)
direction = "forward"
node_default = ng.rnn_sequence(
parameter_X,
parameter_H_t,
parameter_seq_len,
parameter_W,
parameter_R,
parameter_B,
hidden_size,
direction,
)
assert node_default.get_type_name() == "RNNSequence"
assert node_default.get_output_size() == 2
activations = ["RELU", "tanh"]
activation_alpha = [2.0]
activation_beta = [1.0]
clip = 0.5
node = ng.rnn_sequence(
parameter_X,
parameter_H_t,
parameter_seq_len,
parameter_W,
parameter_R,
parameter_B,
hidden_size,
direction,
activations,
activation_alpha,
activation_beta,
clip,
)
assert node.get_type_name() == "RNNSequence"
assert node.get_output_size() == 2
| [
"ngraph.add",
"ngraph.floor_mod",
"ngraph.utils.tensor_iterator_types.TensorIteratorInvariantInputDesc",
"ngraph.bucketize",
"ngraph.lstm_sequence",
"ngraph.proposal",
"ngraph.psroi_pooling",
"ngraph.assign",
"ngraph.roi_pooling",
"pytest.mark.parametrize",
"ngraph.gru_cell",
"ngraph.unsqueeze... | [((1109, 1151), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', 'np_types'], {}), "('dtype', np_types)\n", (1132, 1151), False, 'import pytest\n'), ((1923, 1965), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', 'np_types'], {}), "('dtype', np_types)\n", (1946, 1965), False, 'import pytest\n'), ((2480, 2522), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', 'np_types'], {}), "('dtype', np_types)\n", (2503, 2522), False, 'import pytest\n'), ((3370, 3412), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', 'np_types'], {}), "('dtype', np_types)\n", (3393, 3412), False, 'import pytest\n'), ((4466, 4508), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', 'np_types'], {}), "('dtype', np_types)\n", (4489, 4508), False, 'import pytest\n'), ((4998, 5040), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', 'np_types'], {}), "('dtype', np_types)\n", (5021, 5040), False, 'import pytest\n'), ((5776, 5834), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.float32, np.float64]'], {}), "('dtype', [np.float32, np.float64])\n", (5799, 5834), False, 'import pytest\n'), ((7627, 7685), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.float32, np.float64]'], {}), "('dtype', [np.float32, np.float64])\n", (7650, 7685), False, 'import pytest\n'), ((9499, 9557), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.float32, np.float64]'], {}), "('dtype', [np.float32, np.float64])\n", (9522, 9557), False, 'import pytest\n'), ((11506, 11564), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.float32, np.float64]'], {}), "('dtype', [np.float32, np.float64])\n", (11529, 11564), False, 'import pytest\n'), ((13523, 13581), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.float32, np.float64]'], {}), "('dtype', [np.float32, np.float64])\n", (13546, 13581), False, 'import pytest\n'), ((27088, 27139), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', 'integral_np_types'], {}), "('dtype', integral_np_types)\n", (27111, 27139), False, 'import pytest\n'), ((27679, 27983), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""int_dtype, fp_dtype"""', '[(np.int8, np.float32), (np.int16, np.float32), (np.int32, np.float32), (np\n .int64, np.float32), (np.uint8, np.float32), (np.uint16, np.float32), (\n np.uint32, np.float32), (np.uint64, np.float32), (np.int32, np.float16),\n (np.int32, np.float64)]'], {}), "('int_dtype, fp_dtype', [(np.int8, np.float32), (np.\n int16, np.float32), (np.int32, np.float32), (np.int64, np.float32), (np\n .uint8, np.float32), (np.uint16, np.float32), (np.uint32, np.float32),\n (np.uint64, np.float32), (np.int32, np.float16), (np.int32, np.float64)])\n", (27702, 27983), False, 'import pytest\n'), ((28660, 28964), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""int_dtype, fp_dtype"""', '[(np.int8, np.float32), (np.int16, np.float32), (np.int32, np.float32), (np\n .int64, np.float32), (np.uint8, np.float32), (np.uint16, np.float32), (\n np.uint32, np.float32), (np.uint64, np.float32), (np.int32, np.float16),\n (np.int32, np.float64)]'], {}), "('int_dtype, fp_dtype', [(np.int8, np.float32), (np.\n int16, np.float32), (np.int32, np.float32), (np.int64, np.float32), (np\n .uint8, np.float32), (np.uint16, np.float32), (np.uint32, np.float32),\n (np.uint64, np.float32), (np.int32, np.float16), (np.int32, np.float64)])\n", (28683, 28964), False, 'import pytest\n'), ((29636, 29940), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""int_dtype, fp_dtype"""', '[(np.int8, np.float32), (np.int16, np.float32), (np.int32, np.float32), (np\n .int64, np.float32), (np.uint8, np.float32), (np.uint16, np.float32), (\n np.uint32, np.float32), (np.uint64, np.float32), (np.int32, np.float16),\n (np.int32, np.float64)]'], {}), "('int_dtype, fp_dtype', [(np.int8, np.float32), (np.\n int16, np.float32), (np.int32, np.float32), (np.int64, np.float32), (np\n .uint8, np.float32), (np.uint16, np.float32), (np.uint32, np.float32),\n (np.uint64, np.float32), (np.int32, np.float16), (np.int32, np.float64)])\n", (29659, 29940), False, 'import pytest\n'), ((30858, 31064), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""int_dtype, fp_dtype"""', '[(np.uint8, np.float32), (np.uint16, np.float32), (np.uint32, np.float32),\n (np.uint64, np.float32), (np.uint32, np.float16), (np.uint32, np.float64)]'], {}), "('int_dtype, fp_dtype', [(np.uint8, np.float32), (np\n .uint16, np.float32), (np.uint32, np.float32), (np.uint64, np.float32),\n (np.uint32, np.float16), (np.uint32, np.float64)])\n", (30881, 31064), False, 'import pytest\n'), ((35851, 35909), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.float32, np.float64]'], {}), "('dtype', [np.float32, np.float64])\n", (35874, 35909), False, 'import pytest\n'), ((37837, 37895), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.float32, np.float64]'], {}), "('dtype', [np.float32, np.float64])\n", (37860, 37895), False, 'import pytest\n'), ((39833, 39891), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.float32, np.float64]'], {}), "('dtype', [np.float32, np.float64])\n", (39856, 39891), False, 'import pytest\n'), ((41790, 41848), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.float32, np.float64]'], {}), "('dtype', [np.float32, np.float64])\n", (41813, 41848), False, 'import pytest\n'), ((43757, 43815), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.float32, np.float64]'], {}), "('dtype', [np.float32, np.float64])\n", (43780, 43815), False, 'import pytest\n'), ((45734, 45792), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.float32, np.float64]'], {}), "('dtype', [np.float32, np.float64])\n", (45757, 45792), False, 'import pytest\n'), ((47672, 47730), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.float32, np.float64]'], {}), "('dtype', [np.float32, np.float64])\n", (47695, 47730), False, 'import pytest\n'), ((49457, 49515), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.float32, np.float64]'], {}), "('dtype', [np.float32, np.float64])\n", (49480, 49515), False, 'import pytest\n'), ((51252, 51310), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[np.float32, np.float64]'], {}), "('dtype', [np.float32, np.float64])\n", (51275, 51310), False, 'import pytest\n'), ((1202, 1218), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (1210, 1218), True, 'import numpy as np\n'), ((1236, 1252), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (1244, 1252), True, 'import numpy as np\n'), ((1268, 1284), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (1276, 1284), True, 'import numpy as np\n'), ((1301, 1317), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (1309, 1317), True, 'import numpy as np\n'), ((1488, 1542), 'ngraph.parameter', 'ng.parameter', (['input0_shape'], {'name': '"""Input0"""', 'dtype': 'dtype'}), "(input0_shape, name='Input0', dtype=dtype)\n", (1500, 1542), True, 'import ngraph as ng\n'), ((1566, 1620), 'ngraph.parameter', 'ng.parameter', (['input1_shape'], {'name': '"""Input1"""', 'dtype': 'dtype'}), "(input1_shape, name='Input1', dtype=dtype)\n", (1578, 1620), True, 'import ngraph as ng\n'), ((1633, 1753), 'ngraph.binary_convolution', 'ng.binary_convolution', (['parameter_input0', 'parameter_input1', 'strides', 'pads_begin', 'pads_end', 'dilations', 'mode', 'pad_value'], {}), '(parameter_input0, parameter_input1, strides,\n pads_begin, pads_end, dilations, mode, pad_value)\n', (1654, 1753), True, 'import ngraph as ng\n'), ((2120, 2174), 'ngraph.parameter', 'ng.parameter', (['input0_shape'], {'name': '"""Input0"""', 'dtype': 'dtype'}), "(input0_shape, name='Input0', dtype=dtype)\n", (2132, 2174), True, 'import ngraph as ng\n'), ((2198, 2252), 'ngraph.parameter', 'ng.parameter', (['input1_shape'], {'name': '"""Input1"""', 'dtype': 'dtype'}), "(input1_shape, name='Input1', dtype=dtype)\n", (2210, 2252), True, 'import ngraph as ng\n'), ((2265, 2322), 'ngraph.ctc_greedy_decoder', 'ng.ctc_greedy_decoder', (['parameter_input0', 'parameter_input1'], {}), '(parameter_input0, parameter_input1)\n', (2286, 2322), True, 'import ngraph as ng\n'), ((2577, 2593), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (2585, 2593), True, 'import numpy as np\n'), ((2611, 2627), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (2619, 2627), True, 'import numpy as np\n'), ((2643, 2659), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (2651, 2659), True, 'import numpy as np\n'), ((2676, 2692), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (2684, 2692), True, 'import numpy as np\n'), ((2848, 2902), 'ngraph.parameter', 'ng.parameter', (['input0_shape'], {'name': '"""Input0"""', 'dtype': 'dtype'}), "(input0_shape, name='Input0', dtype=dtype)\n", (2860, 2902), True, 'import ngraph as ng\n'), ((2926, 2980), 'ngraph.parameter', 'ng.parameter', (['input1_shape'], {'name': '"""Input1"""', 'dtype': 'dtype'}), "(input1_shape, name='Input1', dtype=dtype)\n", (2938, 2980), True, 'import ngraph as ng\n'), ((3004, 3058), 'ngraph.parameter', 'ng.parameter', (['input2_shape'], {'name': '"""Input2"""', 'dtype': 'dtype'}), "(input2_shape, name='Input2', dtype=dtype)\n", (3016, 3058), True, 'import ngraph as ng\n'), ((3071, 3196), 'ngraph.deformable_convolution', 'ng.deformable_convolution', (['parameter_input0', 'parameter_input1', 'parameter_input2', 'strides', 'pads_begin', 'pads_end', 'dilations'], {}), '(parameter_input0, parameter_input1,\n parameter_input2, strides, pads_begin, pads_end, dilations)\n', (3096, 3196), True, 'import ngraph as ng\n'), ((3796, 3850), 'ngraph.parameter', 'ng.parameter', (['input0_shape'], {'name': '"""Input0"""', 'dtype': 'dtype'}), "(input0_shape, name='Input0', dtype=dtype)\n", (3808, 3850), True, 'import ngraph as ng\n'), ((3874, 3928), 'ngraph.parameter', 'ng.parameter', (['input1_shape'], {'name': '"""Input1"""', 'dtype': 'dtype'}), "(input1_shape, name='Input1', dtype=dtype)\n", (3886, 3928), True, 'import ngraph as ng\n'), ((3952, 4006), 'ngraph.parameter', 'ng.parameter', (['input2_shape'], {'name': '"""Input2"""', 'dtype': 'dtype'}), "(input2_shape, name='Input2', dtype=dtype)\n", (3964, 4006), True, 'import ngraph as ng\n'), ((4019, 4215), 'ngraph.deformable_psroi_pooling', 'ng.deformable_psroi_pooling', (['parameter_input0', 'parameter_input1', 'output_dim', 'spatial_scale', 'group_size', 'mode', 'spatial_bins_x', 'spatial_bins_y', 'trans_std', 'part_size'], {'offsets': 'parameter_input2'}), '(parameter_input0, parameter_input1, output_dim,\n spatial_scale, group_size, mode, spatial_bins_x, spatial_bins_y,\n trans_std, part_size, offsets=parameter_input2)\n', (4046, 4215), True, 'import ngraph as ng\n'), ((4655, 4709), 'ngraph.parameter', 'ng.parameter', (['input0_shape'], {'name': '"""Input0"""', 'dtype': 'dtype'}), "(input0_shape, name='Input0', dtype=dtype)\n", (4667, 4709), True, 'import ngraph as ng\n'), ((4733, 4787), 'ngraph.parameter', 'ng.parameter', (['input1_shape'], {'name': '"""Input1"""', 'dtype': 'dtype'}), "(input1_shape, name='Input1', dtype=dtype)\n", (4745, 4787), True, 'import ngraph as ng\n'), ((4800, 4848), 'ngraph.floor_mod', 'ng.floor_mod', (['parameter_input0', 'parameter_input1'], {}), '(parameter_input0, parameter_input1)\n', (4812, 4848), True, 'import ngraph as ng\n'), ((5237, 5291), 'ngraph.parameter', 'ng.parameter', (['input0_shape'], {'name': '"""Input0"""', 'dtype': 'dtype'}), "(input0_shape, name='Input0', dtype=dtype)\n", (5249, 5291), True, 'import ngraph as ng\n'), ((5315, 5369), 'ngraph.parameter', 'ng.parameter', (['input1_shape'], {'name': '"""Input1"""', 'dtype': 'dtype'}), "(input1_shape, name='Input1', dtype=dtype)\n", (5327, 5369), True, 'import ngraph as ng\n'), ((5393, 5447), 'ngraph.parameter', 'ng.parameter', (['input2_shape'], {'name': '"""Input2"""', 'dtype': 'dtype'}), "(input2_shape, name='Input2', dtype=dtype)\n", (5405, 5447), True, 'import ngraph as ng\n'), ((5471, 5525), 'ngraph.parameter', 'ng.parameter', (['input3_shape'], {'name': '"""Input3"""', 'dtype': 'dtype'}), "(input3_shape, name='Input3', dtype=dtype)\n", (5483, 5525), True, 'import ngraph as ng\n'), ((5538, 5628), 'ngraph.gather_tree', 'ng.gather_tree', (['parameter_input0', 'parameter_input1', 'parameter_input2', 'parameter_input3'], {}), '(parameter_input0, parameter_input1, parameter_input2,\n parameter_input3)\n', (5552, 5628), True, 'import ngraph as ng\n'), ((6196, 6240), 'ngraph.parameter', 'ng.parameter', (['X_shape'], {'name': '"""X"""', 'dtype': 'dtype'}), "(X_shape, name='X', dtype=dtype)\n", (6208, 6240), True, 'import ngraph as ng\n'), ((6261, 6309), 'ngraph.parameter', 'ng.parameter', (['H_t_shape'], {'name': '"""H_t"""', 'dtype': 'dtype'}), "(H_t_shape, name='H_t', dtype=dtype)\n", (6273, 6309), True, 'import ngraph as ng\n'), ((6330, 6378), 'ngraph.parameter', 'ng.parameter', (['C_t_shape'], {'name': '"""C_t"""', 'dtype': 'dtype'}), "(C_t_shape, name='C_t', dtype=dtype)\n", (6342, 6378), True, 'import ngraph as ng\n'), ((6397, 6441), 'ngraph.parameter', 'ng.parameter', (['W_shape'], {'name': '"""W"""', 'dtype': 'dtype'}), "(W_shape, name='W', dtype=dtype)\n", (6409, 6441), True, 'import ngraph as ng\n'), ((6460, 6504), 'ngraph.parameter', 'ng.parameter', (['R_shape'], {'name': '"""R"""', 'dtype': 'dtype'}), "(R_shape, name='R', dtype=dtype)\n", (6472, 6504), True, 'import ngraph as ng\n'), ((6523, 6567), 'ngraph.parameter', 'ng.parameter', (['B_shape'], {'name': '"""B"""', 'dtype': 'dtype'}), "(B_shape, name='B', dtype=dtype)\n", (6535, 6567), True, 'import ngraph as ng\n'), ((6619, 6730), 'ngraph.lstm_cell', 'ng.lstm_cell', (['parameter_X', 'parameter_H_t', 'parameter_C_t', 'parameter_W', 'parameter_R', 'parameter_B', 'hidden_size'], {}), '(parameter_X, parameter_H_t, parameter_C_t, parameter_W,\n parameter_R, parameter_B, hidden_size)\n', (6631, 6730), True, 'import ngraph as ng\n'), ((7137, 7306), 'ngraph.lstm_cell', 'ng.lstm_cell', (['parameter_X', 'parameter_H_t', 'parameter_C_t', 'parameter_W', 'parameter_R', 'parameter_B', 'hidden_size', 'activations', 'activation_alpha', 'activation_beta', 'clip'], {}), '(parameter_X, parameter_H_t, parameter_C_t, parameter_W,\n parameter_R, parameter_B, hidden_size, activations, activation_alpha,\n activation_beta, clip)\n', (7149, 7306), True, 'import ngraph as ng\n'), ((8054, 8098), 'ngraph.parameter', 'ng.parameter', (['X_shape'], {'name': '"""X"""', 'dtype': 'dtype'}), "(X_shape, name='X', dtype=dtype)\n", (8066, 8098), True, 'import ngraph as ng\n'), ((8119, 8167), 'ngraph.parameter', 'ng.parameter', (['H_t_shape'], {'name': '"""H_t"""', 'dtype': 'dtype'}), "(H_t_shape, name='H_t', dtype=dtype)\n", (8131, 8167), True, 'import ngraph as ng\n'), ((8188, 8236), 'ngraph.parameter', 'ng.parameter', (['C_t_shape'], {'name': '"""C_t"""', 'dtype': 'dtype'}), "(C_t_shape, name='C_t', dtype=dtype)\n", (8200, 8236), True, 'import ngraph as ng\n'), ((8255, 8299), 'ngraph.parameter', 'ng.parameter', (['W_shape'], {'name': '"""W"""', 'dtype': 'dtype'}), "(W_shape, name='W', dtype=dtype)\n", (8267, 8299), True, 'import ngraph as ng\n'), ((8318, 8362), 'ngraph.parameter', 'ng.parameter', (['R_shape'], {'name': '"""R"""', 'dtype': 'dtype'}), "(R_shape, name='R', dtype=dtype)\n", (8330, 8362), True, 'import ngraph as ng\n'), ((8381, 8425), 'ngraph.parameter', 'ng.parameter', (['B_shape'], {'name': '"""B"""', 'dtype': 'dtype'}), "(B_shape, name='B', dtype=dtype)\n", (8393, 8425), True, 'import ngraph as ng\n'), ((8477, 8595), 'ngraph.opset1.lstm_cell', 'ng_opset1.lstm_cell', (['parameter_X', 'parameter_H_t', 'parameter_C_t', 'parameter_W', 'parameter_R', 'parameter_B', 'hidden_size'], {}), '(parameter_X, parameter_H_t, parameter_C_t, parameter_W,\n parameter_R, parameter_B, hidden_size)\n', (8496, 8595), True, 'import ngraph.opset1 as ng_opset1\n'), ((9002, 9178), 'ngraph.opset1.lstm_cell', 'ng_opset1.lstm_cell', (['parameter_X', 'parameter_H_t', 'parameter_C_t', 'parameter_W', 'parameter_R', 'parameter_B', 'hidden_size', 'activations', 'activation_alpha', 'activation_beta', 'clip'], {}), '(parameter_X, parameter_H_t, parameter_C_t, parameter_W,\n parameter_R, parameter_B, hidden_size, activations, activation_alpha,\n activation_beta, clip)\n', (9021, 9178), True, 'import ngraph.opset1 as ng_opset1\n'), ((10111, 10155), 'ngraph.parameter', 'ng.parameter', (['X_shape'], {'name': '"""X"""', 'dtype': 'dtype'}), "(X_shape, name='X', dtype=dtype)\n", (10123, 10155), True, 'import ngraph as ng\n'), ((10176, 10224), 'ngraph.parameter', 'ng.parameter', (['H_t_shape'], {'name': '"""H_t"""', 'dtype': 'dtype'}), "(H_t_shape, name='H_t', dtype=dtype)\n", (10188, 10224), True, 'import ngraph as ng\n'), ((10245, 10293), 'ngraph.parameter', 'ng.parameter', (['C_t_shape'], {'name': '"""C_t"""', 'dtype': 'dtype'}), "(C_t_shape, name='C_t', dtype=dtype)\n", (10257, 10293), True, 'import ngraph as ng\n'), ((10318, 10377), 'ngraph.parameter', 'ng.parameter', (['seq_len_shape'], {'name': '"""seq_len"""', 'dtype': 'np.int32'}), "(seq_len_shape, name='seq_len', dtype=np.int32)\n", (10330, 10377), True, 'import ngraph as ng\n'), ((10396, 10440), 'ngraph.parameter', 'ng.parameter', (['W_shape'], {'name': '"""W"""', 'dtype': 'dtype'}), "(W_shape, name='W', dtype=dtype)\n", (10408, 10440), True, 'import ngraph as ng\n'), ((10459, 10503), 'ngraph.parameter', 'ng.parameter', (['R_shape'], {'name': '"""R"""', 'dtype': 'dtype'}), "(R_shape, name='R', dtype=dtype)\n", (10471, 10503), True, 'import ngraph as ng\n'), ((10522, 10566), 'ngraph.parameter', 'ng.parameter', (['B_shape'], {'name': '"""B"""', 'dtype': 'dtype'}), "(B_shape, name='B', dtype=dtype)\n", (10534, 10566), True, 'import ngraph as ng\n'), ((10611, 10767), 'ngraph.opset1.lstm_sequence', 'ng_opset1.lstm_sequence', (['parameter_X', 'parameter_H_t', 'parameter_C_t', 'parameter_seq_len', 'parameter_W', 'parameter_R', 'parameter_B', 'hidden_size', 'direction'], {}), '(parameter_X, parameter_H_t, parameter_C_t,\n parameter_seq_len, parameter_W, parameter_R, parameter_B, hidden_size,\n direction)\n', (10634, 10767), True, 'import ngraph.opset1 as ng_opset1\n'), ((11087, 11297), 'ngraph.opset1.lstm_sequence', 'ng_opset1.lstm_sequence', (['parameter_X', 'parameter_H_t', 'parameter_C_t', 'parameter_seq_len', 'parameter_W', 'parameter_R', 'parameter_B', 'hidden_size', 'direction', 'activations', 'activation_alpha', 'activation_beta', 'clip'], {}), '(parameter_X, parameter_H_t, parameter_C_t,\n parameter_seq_len, parameter_W, parameter_R, parameter_B, hidden_size,\n direction, activations, activation_alpha, activation_beta, clip)\n', (11110, 11297), True, 'import ngraph.opset1 as ng_opset1\n'), ((12109, 12153), 'ngraph.parameter', 'ng.parameter', (['X_shape'], {'name': '"""X"""', 'dtype': 'dtype'}), "(X_shape, name='X', dtype=dtype)\n", (12121, 12153), True, 'import ngraph as ng\n'), ((12174, 12222), 'ngraph.parameter', 'ng.parameter', (['H_t_shape'], {'name': '"""H_t"""', 'dtype': 'dtype'}), "(H_t_shape, name='H_t', dtype=dtype)\n", (12186, 12222), True, 'import ngraph as ng\n'), ((12243, 12291), 'ngraph.parameter', 'ng.parameter', (['C_t_shape'], {'name': '"""C_t"""', 'dtype': 'dtype'}), "(C_t_shape, name='C_t', dtype=dtype)\n", (12255, 12291), True, 'import ngraph as ng\n'), ((12316, 12375), 'ngraph.parameter', 'ng.parameter', (['seq_len_shape'], {'name': '"""seq_len"""', 'dtype': 'np.int32'}), "(seq_len_shape, name='seq_len', dtype=np.int32)\n", (12328, 12375), True, 'import ngraph as ng\n'), ((12394, 12438), 'ngraph.parameter', 'ng.parameter', (['W_shape'], {'name': '"""W"""', 'dtype': 'dtype'}), "(W_shape, name='W', dtype=dtype)\n", (12406, 12438), True, 'import ngraph as ng\n'), ((12457, 12501), 'ngraph.parameter', 'ng.parameter', (['R_shape'], {'name': '"""R"""', 'dtype': 'dtype'}), "(R_shape, name='R', dtype=dtype)\n", (12469, 12501), True, 'import ngraph as ng\n'), ((12520, 12564), 'ngraph.parameter', 'ng.parameter', (['B_shape'], {'name': '"""B"""', 'dtype': 'dtype'}), "(B_shape, name='B', dtype=dtype)\n", (12532, 12564), True, 'import ngraph as ng\n'), ((12612, 12768), 'ngraph.opset1.lstm_sequence', 'ng_opset1.lstm_sequence', (['parameter_X', 'parameter_H_t', 'parameter_C_t', 'parameter_seq_len', 'parameter_W', 'parameter_R', 'parameter_B', 'hidden_size', 'direction'], {}), '(parameter_X, parameter_H_t, parameter_C_t,\n parameter_seq_len, parameter_W, parameter_R, parameter_B, hidden_size,\n direction)\n', (12635, 12768), True, 'import ngraph.opset1 as ng_opset1\n'), ((13104, 13314), 'ngraph.opset1.lstm_sequence', 'ng_opset1.lstm_sequence', (['parameter_X', 'parameter_H_t', 'parameter_C_t', 'parameter_seq_len', 'parameter_W', 'parameter_R', 'parameter_B', 'hidden_size', 'direction', 'activations', 'activation_alpha', 'activation_beta', 'clip'], {}), '(parameter_X, parameter_H_t, parameter_C_t,\n parameter_seq_len, parameter_W, parameter_R, parameter_B, hidden_size,\n direction, activations, activation_alpha, activation_beta, clip)\n', (13127, 13314), True, 'import ngraph.opset1 as ng_opset1\n'), ((14126, 14170), 'ngraph.parameter', 'ng.parameter', (['X_shape'], {'name': '"""X"""', 'dtype': 'dtype'}), "(X_shape, name='X', dtype=dtype)\n", (14138, 14170), True, 'import ngraph as ng\n'), ((14191, 14239), 'ngraph.parameter', 'ng.parameter', (['H_t_shape'], {'name': '"""H_t"""', 'dtype': 'dtype'}), "(H_t_shape, name='H_t', dtype=dtype)\n", (14203, 14239), True, 'import ngraph as ng\n'), ((14260, 14308), 'ngraph.parameter', 'ng.parameter', (['C_t_shape'], {'name': '"""C_t"""', 'dtype': 'dtype'}), "(C_t_shape, name='C_t', dtype=dtype)\n", (14272, 14308), True, 'import ngraph as ng\n'), ((14333, 14392), 'ngraph.parameter', 'ng.parameter', (['seq_len_shape'], {'name': '"""seq_len"""', 'dtype': 'np.int32'}), "(seq_len_shape, name='seq_len', dtype=np.int32)\n", (14345, 14392), True, 'import ngraph as ng\n'), ((14411, 14455), 'ngraph.parameter', 'ng.parameter', (['W_shape'], {'name': '"""W"""', 'dtype': 'dtype'}), "(W_shape, name='W', dtype=dtype)\n", (14423, 14455), True, 'import ngraph as ng\n'), ((14474, 14518), 'ngraph.parameter', 'ng.parameter', (['R_shape'], {'name': '"""R"""', 'dtype': 'dtype'}), "(R_shape, name='R', dtype=dtype)\n", (14486, 14518), True, 'import ngraph as ng\n'), ((14537, 14581), 'ngraph.parameter', 'ng.parameter', (['B_shape'], {'name': '"""B"""', 'dtype': 'dtype'}), "(B_shape, name='B', dtype=dtype)\n", (14549, 14581), True, 'import ngraph as ng\n'), ((14629, 14785), 'ngraph.opset1.lstm_sequence', 'ng_opset1.lstm_sequence', (['parameter_X', 'parameter_H_t', 'parameter_C_t', 'parameter_seq_len', 'parameter_W', 'parameter_R', 'parameter_B', 'hidden_size', 'direction'], {}), '(parameter_X, parameter_H_t, parameter_C_t,\n parameter_seq_len, parameter_W, parameter_R, parameter_B, hidden_size,\n direction)\n', (14652, 14785), True, 'import ngraph.opset1 as ng_opset1\n'), ((15094, 15304), 'ngraph.opset1.lstm_sequence', 'ng_opset1.lstm_sequence', (['parameter_X', 'parameter_H_t', 'parameter_C_t', 'parameter_seq_len', 'parameter_W', 'parameter_R', 'parameter_B', 'hidden_size', 'direction', 'activations', 'activation_alpha', 'activation_beta', 'clip'], {}), '(parameter_X, parameter_H_t, parameter_C_t,\n parameter_seq_len, parameter_W, parameter_R, parameter_B, hidden_size,\n direction, activations, activation_alpha, activation_beta, clip)\n', (15117, 15304), True, 'import ngraph.opset1 as ng_opset1\n'), ((15813, 15862), 'ngraph.parameter', 'ng.parameter', (['X_shape'], {'name': '"""X"""', 'dtype': 'np.float32'}), "(X_shape, name='X', dtype=np.float32)\n", (15825, 15862), True, 'import ngraph as ng\n'), ((15883, 15936), 'ngraph.parameter', 'ng.parameter', (['H_t_shape'], {'name': '"""H_t"""', 'dtype': 'np.float32'}), "(H_t_shape, name='H_t', dtype=np.float32)\n", (15895, 15936), True, 'import ngraph as ng\n'), ((15955, 16004), 'ngraph.parameter', 'ng.parameter', (['W_shape'], {'name': '"""W"""', 'dtype': 'np.float32'}), "(W_shape, name='W', dtype=np.float32)\n", (15967, 16004), True, 'import ngraph as ng\n'), ((16023, 16072), 'ngraph.parameter', 'ng.parameter', (['R_shape'], {'name': '"""R"""', 'dtype': 'np.float32'}), "(R_shape, name='R', dtype=np.float32)\n", (16035, 16072), True, 'import ngraph as ng\n'), ((16091, 16140), 'ngraph.parameter', 'ng.parameter', (['B_shape'], {'name': '"""B"""', 'dtype': 'np.float32'}), "(B_shape, name='B', dtype=np.float32)\n", (16103, 16140), True, 'import ngraph as ng\n'), ((16192, 16287), 'ngraph.gru_cell', 'ng.gru_cell', (['parameter_X', 'parameter_H_t', 'parameter_W', 'parameter_R', 'parameter_B', 'hidden_size'], {}), '(parameter_X, parameter_H_t, parameter_W, parameter_R,\n parameter_B, hidden_size)\n', (16203, 16287), True, 'import ngraph as ng\n'), ((16745, 16794), 'ngraph.parameter', 'ng.parameter', (['B_shape'], {'name': '"""B"""', 'dtype': 'np.float32'}), "(B_shape, name='B', dtype=np.float32)\n", (16757, 16794), True, 'import ngraph as ng\n'), ((16813, 16989), 'ngraph.gru_cell', 'ng.gru_cell', (['parameter_X', 'parameter_H_t', 'parameter_W', 'parameter_R', 'parameter_B', 'hidden_size', 'activations', 'activations_alpha', 'activations_beta', 'clip', 'linear_before_reset'], {}), '(parameter_X, parameter_H_t, parameter_W, parameter_R,\n parameter_B, hidden_size, activations, activations_alpha,\n activations_beta, clip, linear_before_reset)\n', (16824, 16989), True, 'import ngraph as ng\n'), ((17728, 17777), 'ngraph.parameter', 'ng.parameter', (['X_shape'], {'name': '"""X"""', 'dtype': 'np.float32'}), "(X_shape, name='X', dtype=np.float32)\n", (17740, 17777), True, 'import ngraph as ng\n'), ((17798, 17851), 'ngraph.parameter', 'ng.parameter', (['H_t_shape'], {'name': '"""H_t"""', 'dtype': 'np.float32'}), "(H_t_shape, name='H_t', dtype=np.float32)\n", (17810, 17851), True, 'import ngraph as ng\n'), ((17870, 17919), 'ngraph.parameter', 'ng.parameter', (['W_shape'], {'name': '"""W"""', 'dtype': 'np.float32'}), "(W_shape, name='W', dtype=np.float32)\n", (17882, 17919), True, 'import ngraph as ng\n'), ((17938, 17987), 'ngraph.parameter', 'ng.parameter', (['R_shape'], {'name': '"""R"""', 'dtype': 'np.float32'}), "(R_shape, name='R', dtype=np.float32)\n", (17950, 17987), True, 'import ngraph as ng\n'), ((18006, 18055), 'ngraph.parameter', 'ng.parameter', (['B_shape'], {'name': '"""B"""', 'dtype': 'np.float32'}), "(B_shape, name='B', dtype=np.float32)\n", (18018, 18055), True, 'import ngraph as ng\n'), ((18216, 18339), 'ngraph.gru_sequence', 'ng.gru_sequence', (['parameter_X', 'parameter_H_t', 'seq_lengths', 'parameter_W', 'parameter_R', 'parameter_B', 'hidden_size', 'direction'], {}), '(parameter_X, parameter_H_t, seq_lengths, parameter_W,\n parameter_R, parameter_B, hidden_size, direction)\n', (18231, 18339), True, 'import ngraph as ng\n'), ((18960, 19009), 'ngraph.parameter', 'ng.parameter', (['B_shape'], {'name': '"""B"""', 'dtype': 'np.float32'}), "(B_shape, name='B', dtype=np.float32)\n", (18972, 19009), True, 'import ngraph as ng\n'), ((19028, 19232), 'ngraph.gru_sequence', 'ng.gru_sequence', (['parameter_X', 'parameter_H_t', 'seq_lengths', 'parameter_W', 'parameter_R', 'parameter_B', 'hidden_size', 'direction', 'activations', 'activations_alpha', 'activations_beta', 'clip', 'linear_before_reset'], {}), '(parameter_X, parameter_H_t, seq_lengths, parameter_W,\n parameter_R, parameter_B, hidden_size, direction, activations,\n activations_alpha, activations_beta, clip, linear_before_reset)\n', (19043, 19232), True, 'import ngraph as ng\n'), ((20049, 20098), 'ngraph.parameter', 'ng.parameter', (['X_shape'], {'name': '"""X"""', 'dtype': 'np.float32'}), "(X_shape, name='X', dtype=np.float32)\n", (20061, 20098), True, 'import ngraph as ng\n'), ((20119, 20172), 'ngraph.parameter', 'ng.parameter', (['H_t_shape'], {'name': '"""H_t"""', 'dtype': 'np.float32'}), "(H_t_shape, name='H_t', dtype=np.float32)\n", (20131, 20172), True, 'import ngraph as ng\n'), ((20191, 20240), 'ngraph.parameter', 'ng.parameter', (['W_shape'], {'name': '"""W"""', 'dtype': 'np.float32'}), "(W_shape, name='W', dtype=np.float32)\n", (20203, 20240), True, 'import ngraph as ng\n'), ((20259, 20308), 'ngraph.parameter', 'ng.parameter', (['R_shape'], {'name': '"""R"""', 'dtype': 'np.float32'}), "(R_shape, name='R', dtype=np.float32)\n", (20271, 20308), True, 'import ngraph as ng\n'), ((20327, 20376), 'ngraph.parameter', 'ng.parameter', (['B_shape'], {'name': '"""B"""', 'dtype': 'np.float32'}), "(B_shape, name='B', dtype=np.float32)\n", (20339, 20376), True, 'import ngraph as ng\n'), ((20537, 20660), 'ngraph.rnn_sequence', 'ng.rnn_sequence', (['parameter_X', 'parameter_H_t', 'seq_lengths', 'parameter_W', 'parameter_R', 'parameter_B', 'hidden_size', 'direction'], {}), '(parameter_X, parameter_H_t, seq_lengths, parameter_W,\n parameter_R, parameter_B, hidden_size, direction)\n', (20552, 20660), True, 'import ngraph as ng\n'), ((21093, 21276), 'ngraph.rnn_sequence', 'ng.rnn_sequence', (['parameter_X', 'parameter_H_t', 'seq_lengths', 'parameter_W', 'parameter_R', 'parameter_B', 'hidden_size', 'direction', 'activations', 'activations_alpha', 'activations_beta', 'clip'], {}), '(parameter_X, parameter_H_t, seq_lengths, parameter_W,\n parameter_R, parameter_B, hidden_size, direction, activations,\n activations_alpha, activations_beta, clip)\n', (21108, 21276), True, 'import ngraph as ng\n'), ((21703, 21733), 'ngraph.loop', 'ng.loop', (['trip_count', 'condition'], {}), '(trip_count, condition)\n', (21710, 21733), True, 'import ngraph as ng\n'), ((21824, 21868), 'ngraph.parameter', 'ng.parameter', (['[2, 3, 4, 5]'], {'dtype': 'np.float32'}), '([2, 3, 4, 5], dtype=np.float32)\n', (21836, 21868), True, 'import ngraph as ng\n'), ((21882, 21922), 'ngraph.parameter', 'ng.parameter', (['[150, 5]'], {'dtype': 'np.float32'}), '([150, 5], dtype=np.float32)\n', (21894, 21922), True, 'import ngraph as ng\n'), ((21934, 21987), 'ngraph.roi_pooling', 'ng.roi_pooling', (['inputs', 'coords', '[6, 6]', '(0.0625)', '"""Max"""'], {}), "(inputs, coords, [6, 6], 0.0625, 'Max')\n", (21948, 21987), True, 'import ngraph as ng\n'), ((22237, 22281), 'ngraph.parameter', 'ng.parameter', (['[1, 3, 4, 5]'], {'dtype': 'np.float32'}), '([1, 3, 4, 5], dtype=np.float32)\n', (22249, 22281), True, 'import ngraph as ng\n'), ((22295, 22335), 'ngraph.parameter', 'ng.parameter', (['[150, 5]'], {'dtype': 'np.float32'}), '([150, 5], dtype=np.float32)\n', (22307, 22335), True, 'import ngraph as ng\n'), ((22347, 22406), 'ngraph.psroi_pooling', 'ng.psroi_pooling', (['inputs', 'coords', '(2)', '(6)', '(0.0625)', '(0)', '(0)', '"""Avg"""'], {}), "(inputs, coords, 2, 6, 0.0625, 0, 0, 'Avg')\n", (22363, 22406), True, 'import ngraph as ng\n'), ((22660, 22717), 'ngraph.parameter', 'ng.parameter', (['[1, 2, 3, 4]'], {'name': '"""data"""', 'dtype': 'np.float32'}), "([1, 2, 3, 4], name='data', dtype=np.float32)\n", (22672, 22717), True, 'import ngraph as ng\n'), ((22729, 22758), 'ngraph.constant', 'ng.constant', (['(1)'], {'dtype': 'np.int8'}), '(1, dtype=np.int8)\n', (22740, 22758), True, 'import ngraph as ng\n'), ((22771, 22808), 'ngraph.convert_like', 'ng.convert_like', (['parameter_data', 'like'], {}), '(parameter_data, like)\n', (22786, 22808), True, 'import ngraph as ng\n'), ((23045, 23102), 'ngraph.parameter', 'ng.parameter', (['[4, 3, 2, 1]'], {'name': '"""data"""', 'dtype': 'np.float32'}), "([4, 3, 2, 1], name='data', dtype=np.float32)\n", (23057, 23102), True, 'import ngraph as ng\n'), ((23117, 23166), 'ngraph.parameter', 'ng.parameter', (['[5]'], {'name': '"""buckets"""', 'dtype': 'np.int64'}), "([5], name='buckets', dtype=np.int64)\n", (23129, 23166), True, 'import ngraph as ng\n'), ((23179, 23213), 'ngraph.bucketize', 'ng.bucketize', (['data', 'buckets', '"""i32"""'], {}), "(data, buckets, 'i32')\n", (23191, 23213), True, 'import ngraph as ng\n'), ((23451, 23513), 'ngraph.parameter', 'ng.parameter', (['[1, 125, 13, 13]'], {'name': '"""input"""', 'dtype': 'np.float32'}), "([1, 125, 13, 13], name='input', dtype=np.float32)\n", (23463, 23513), True, 'import ngraph as ng\n'), ((23660, 23756), 'ngraph.region_yolo', 'ng.region_yolo', (['data', 'num_coords', 'num_classes', 'num_regions', 'do_softmax', 'mask', 'axis', 'end_axis'], {}), '(data, num_coords, num_classes, num_regions, do_softmax, mask,\n axis, end_axis)\n', (23674, 23756), True, 'import ngraph as ng\n'), ((24007, 24066), 'ngraph.parameter', 'ng.parameter', (['[2, 24, 34, 62]'], {'name': '"""input"""', 'dtype': 'np.int32'}), "([2, 24, 34, 62], name='input', dtype=np.int32)\n", (24019, 24066), True, 'import ngraph as ng\n'), ((24096, 24123), 'ngraph.reorg_yolo', 'ng.reorg_yolo', (['data', 'stride'], {}), '(data, stride)\n', (24109, 24123), True, 'import ngraph as ng\n'), ((24385, 24441), 'ngraph.parameter', 'ng.parameter', (['[5, 2]'], {'name': '"""emb_table"""', 'dtype': 'np.float32'}), "([5, 2], name='emb_table', dtype=np.float32)\n", (24397, 24441), True, 'import ngraph as ng\n'), ((24456, 24505), 'ngraph.parameter', 'ng.parameter', (['[4]'], {'name': '"""indices"""', 'dtype': 'np.int64'}), "([4], name='indices', dtype=np.int64)\n", (24468, 24505), True, 'import ngraph as ng\n'), ((24520, 24569), 'ngraph.parameter', 'ng.parameter', (['[3]'], {'name': '"""offsets"""', 'dtype': 'np.int64'}), "([3], name='offsets', dtype=np.int64)\n", (24532, 24569), True, 'import ngraph as ng\n'), ((24590, 24644), 'ngraph.parameter', 'ng.parameter', (['[]'], {'name': '"""default_index"""', 'dtype': 'np.int64'}), "([], name='default_index', dtype=np.int64)\n", (24602, 24644), True, 'import ngraph as ng\n'), ((24657, 24729), 'ngraph.embedding_bag_offsets_sum', 'ng.embedding_bag_offsets_sum', (['emb_table', 'indices', 'offsets', 'default_index'], {}), '(emb_table, indices, offsets, default_index)\n', (24685, 24729), True, 'import ngraph as ng\n'), ((25001, 25057), 'ngraph.parameter', 'ng.parameter', (['[5, 2]'], {'name': '"""emb_table"""', 'dtype': 'np.float32'}), "([5, 2], name='emb_table', dtype=np.float32)\n", (25013, 25057), True, 'import ngraph as ng\n'), ((25072, 25121), 'ngraph.parameter', 'ng.parameter', (['[4]'], {'name': '"""indices"""', 'dtype': 'np.int64'}), "([4], name='indices', dtype=np.int64)\n", (25084, 25121), True, 'import ngraph as ng\n'), ((25140, 25193), 'ngraph.parameter', 'ng.parameter', (['[4]'], {'name': '"""segment_ids"""', 'dtype': 'np.int64'}), "([4], name='segment_ids', dtype=np.int64)\n", (25152, 25193), True, 'import ngraph as ng\n'), ((25213, 25266), 'ngraph.parameter', 'ng.parameter', (['[]'], {'name': '"""num_segments"""', 'dtype': 'np.int64'}), "([], name='num_segments', dtype=np.int64)\n", (25225, 25266), True, 'import ngraph as ng\n'), ((25287, 25341), 'ngraph.parameter', 'ng.parameter', (['[]'], {'name': '"""default_index"""', 'dtype': 'np.int64'}), "([], name='default_index', dtype=np.int64)\n", (25299, 25341), True, 'import ngraph as ng\n'), ((25367, 25429), 'ngraph.parameter', 'ng.parameter', (['[4]'], {'name': '"""per_sample_weights"""', 'dtype': 'np.float32'}), "([4], name='per_sample_weights', dtype=np.float32)\n", (25379, 25429), True, 'import ngraph as ng\n'), ((25442, 25553), 'ngraph.embedding_segments_sum', 'ng.embedding_segments_sum', (['emb_table', 'indices', 'segment_ids', 'num_segments', 'default_index', 'per_sample_weights'], {}), '(emb_table, indices, segment_ids, num_segments,\n default_index, per_sample_weights)\n', (25467, 25553), True, 'import ngraph as ng\n'), ((25870, 25926), 'ngraph.parameter', 'ng.parameter', (['[5, 2]'], {'name': '"""emb_table"""', 'dtype': 'np.float32'}), "([5, 2], name='emb_table', dtype=np.float32)\n", (25882, 25926), True, 'import ngraph as ng\n'), ((25941, 25990), 'ngraph.parameter', 'ng.parameter', (['[4]'], {'name': '"""indices"""', 'dtype': 'np.int64'}), "([4], name='indices', dtype=np.int64)\n", (25953, 25990), True, 'import ngraph as ng\n'), ((26009, 26062), 'ngraph.parameter', 'ng.parameter', (['[4]'], {'name': '"""segment_ids"""', 'dtype': 'np.int64'}), "([4], name='segment_ids', dtype=np.int64)\n", (26021, 26062), True, 'import ngraph as ng\n'), ((26082, 26135), 'ngraph.parameter', 'ng.parameter', (['[]'], {'name': '"""num_segments"""', 'dtype': 'np.int64'}), "([], name='num_segments', dtype=np.int64)\n", (26094, 26135), True, 'import ngraph as ng\n'), ((26186, 26258), 'ngraph.embedding_segments_sum', 'ng.embedding_segments_sum', (['emb_table', 'indices', 'segment_ids', 'num_segments'], {}), '(emb_table, indices, segment_ids, num_segments)\n', (26211, 26258), True, 'import ngraph as ng\n'), ((26546, 26602), 'ngraph.parameter', 'ng.parameter', (['[5, 2]'], {'name': '"""emb_table"""', 'dtype': 'np.float32'}), "([5, 2], name='emb_table', dtype=np.float32)\n", (26558, 26602), True, 'import ngraph as ng\n'), ((26617, 26669), 'ngraph.parameter', 'ng.parameter', (['[3, 3]'], {'name': '"""indices"""', 'dtype': 'np.int64'}), "([3, 3], name='indices', dtype=np.int64)\n", (26629, 26669), True, 'import ngraph as ng\n'), ((26695, 26760), 'ngraph.parameter', 'ng.parameter', (['[3, 3]'], {'name': '"""per_sample_weights"""', 'dtype': 'np.float32'}), "([3, 3], name='per_sample_weights', dtype=np.float32)\n", (26707, 26760), True, 'import ngraph as ng\n'), ((26811, 26878), 'ngraph.embedding_bag_packed_sum', 'ng.embedding_bag_packed_sum', (['emb_table', 'indices', 'per_sample_weights'], {}), '(emb_table, indices, per_sample_weights)\n', (26838, 26878), True, 'import ngraph as ng\n'), ((27379, 27425), 'ngraph.parameter', 'ng.parameter', (['image_shape', 'dtype'], {'name': '"""Image"""'}), "(image_shape, dtype, name='Image')\n", (27391, 27425), True, 'import ngraph as ng\n'), ((27438, 27490), 'ngraph.interpolate', 'ng.interpolate', (['image_node', 'output_shape', 'attributes'], {}), '(image_node, output_shape, attributes)\n', (27452, 27490), True, 'import ngraph as ng\n'), ((28127, 28162), 'numpy.array', 'np.array', (['[64, 64]'], {'dtype': 'int_dtype'}), '([64, 64], dtype=int_dtype)\n', (28135, 28162), True, 'import numpy as np\n'), ((28464, 28514), 'ngraph.prior_box', 'ng.prior_box', (['layer_shape', 'image_shape', 'attributes'], {}), '(layer_shape, image_shape, attributes)\n', (28476, 28514), True, 'import ngraph as ng\n'), ((29117, 29152), 'numpy.array', 'np.array', (['[64, 64]'], {'dtype': 'int_dtype'}), '([64, 64], dtype=int_dtype)\n', (29125, 29152), True, 'import numpy as np\n'), ((29423, 29482), 'ngraph.prior_box_clustered', 'ng.prior_box_clustered', (['output_size', 'image_size', 'attributes'], {}), '(output_size, image_size, attributes)\n', (29445, 29482), True, 'import ngraph as ng\n'), ((30251, 30301), 'ngraph.parameter', 'ng.parameter', (['[4, 1, 5, 5]', 'fp_dtype', '"""box_logits"""'], {}), "([4, 1, 5, 5], fp_dtype, 'box_logits')\n", (30263, 30301), True, 'import ngraph as ng\n'), ((30320, 30371), 'ngraph.parameter', 'ng.parameter', (['[2, 1, 4, 5]', 'fp_dtype', '"""class_preds"""'], {}), "([2, 1, 4, 5], fp_dtype, 'class_preds')\n", (30332, 30371), True, 'import ngraph as ng\n'), ((30388, 30437), 'ngraph.parameter', 'ng.parameter', (['[2, 1, 4, 5]', 'fp_dtype', '"""proposals"""'], {}), "([2, 1, 4, 5], fp_dtype, 'proposals')\n", (30400, 30437), True, 'import ngraph as ng\n'), ((30460, 30515), 'ngraph.parameter', 'ng.parameter', (['[2, 1, 4, 5]', 'fp_dtype', '"""aux_class_preds"""'], {}), "([2, 1, 4, 5], fp_dtype, 'aux_class_preds')\n", (30472, 30515), True, 'import ngraph as ng\n'), ((30536, 30589), 'ngraph.parameter', 'ng.parameter', (['[2, 1, 4, 5]', 'fp_dtype', '"""aux_box_preds"""'], {}), "([2, 1, 4, 5], fp_dtype, 'aux_box_preds')\n", (30548, 30589), True, 'import ngraph as ng\n'), ((30602, 30705), 'ngraph.detection_output', 'ng.detection_output', (['box_logits', 'class_preds', 'proposals', 'attributes', 'aux_class_preds', 'aux_box_preds'], {}), '(box_logits, class_preds, proposals, attributes,\n aux_class_preds, aux_box_preds)\n', (30621, 30705), True, 'import ngraph as ng\n'), ((31572, 31635), 'ngraph.parameter', 'ng.parameter', (['[batch_size, 12, 34, 62]', 'fp_dtype', '"""class_probs"""'], {}), "([batch_size, 12, 34, 62], fp_dtype, 'class_probs')\n", (31584, 31635), True, 'import ngraph as ng\n'), ((31654, 31717), 'ngraph.parameter', 'ng.parameter', (['[batch_size, 24, 34, 62]', 'fp_dtype', '"""bbox_deltas"""'], {}), "([batch_size, 24, 34, 62], fp_dtype, 'bbox_deltas')\n", (31666, 31717), True, 'import ngraph as ng\n'), ((31736, 31778), 'ngraph.parameter', 'ng.parameter', (['[3]', 'fp_dtype', '"""image_shape"""'], {}), "([3], fp_dtype, 'image_shape')\n", (31748, 31778), True, 'import ngraph as ng\n'), ((31790, 31852), 'ngraph.proposal', 'ng.proposal', (['class_probs', 'bbox_deltas', 'image_shape', 'attributes'], {}), '(class_probs, bbox_deltas, image_shape, attributes)\n', (31801, 31852), True, 'import ngraph as ng\n'), ((32379, 32417), 'ngraph.parameter', 'ng.parameter', (['[]', 'np.int32', '"""timestep"""'], {}), "([], np.int32, 'timestep')\n", (32391, 32417), True, 'import ngraph as ng\n'), ((32437, 32483), 'ngraph.parameter', 'ng.parameter', (['[1, 2, 2]', 'np.float32', '"""body_in"""'], {}), "([1, 2, 2], np.float32, 'body_in')\n", (32449, 32483), True, 'import ngraph as ng\n'), ((32504, 32553), 'ngraph.parameter', 'ng.parameter', (['[2, 2]', 'np.float32', '"""body_prev_cma"""'], {}), "([2, 2], np.float32, 'body_prev_cma')\n", (32516, 32553), True, 'import ngraph as ng\n'), ((32575, 32619), 'ngraph.parameter', 'ng.parameter', (['[]', 'np.int32', '"""body_const_one"""'], {}), "([], np.int32, 'body_const_one')\n", (32587, 32619), True, 'import ngraph as ng\n'), ((32825, 32862), 'ngraph.add', 'ng.add', (['body_const_one', 'body_timestep'], {}), '(body_const_one, body_timestep)\n', (32831, 32862), True, 'import ngraph as ng\n'), ((32946, 32973), 'ngraph.unsqueeze', 'ng.unsqueeze', (['curr_cma', '[0]'], {}), '(curr_cma, [0])\n', (32958, 32973), True, 'import ngraph as ng\n'), ((33002, 33046), 'ngraph.parameter', 'ng.parameter', (['[16, 2, 2]', 'np.float32', '"""data"""'], {}), "([16, 2, 2], np.float32, 'data')\n", (33014, 33046), True, 'import ngraph as ng\n'), ((33081, 33111), 'ngraph.constant', 'ng.constant', (['(0)'], {'dtype': 'np.int32'}), '(0, dtype=np.int32)\n', (33092, 33111), True, 'import ngraph as ng\n'), ((33122, 33152), 'ngraph.constant', 'ng.constant', (['(1)'], {'dtype': 'np.int32'}), '(1, dtype=np.int32)\n', (33133, 33152), True, 'import ngraph as ng\n'), ((33363, 33461), 'ngraph.utils.tensor_iterator_types.GraphBody', 'GraphBody', (['[body_timestep, body_data_in, body_prev_cma, body_const_one]', '[curr_cma, cma_hist]'], {}), '([body_timestep, body_data_in, body_prev_cma, body_const_one], [\n curr_cma, cma_hist])\n', (33372, 33461), False, 'from ngraph.utils.tensor_iterator_types import GraphBody, TensorIteratorSliceInputDesc, TensorIteratorMergedInputDesc, TensorIteratorInvariantInputDesc, TensorIteratorBodyOutputDesc, TensorIteratorConcatOutputDesc\n'), ((34200, 34361), 'ngraph.tensor_iterator', 'ng.tensor_iterator', (['ti_inputs', 'graph_body', 'ti_slice_input_desc', 'ti_merged_input_desc', 'ti_invariant_input_desc', 'ti_body_output_desc', 'ti_concat_output_desc'], {}), '(ti_inputs, graph_body, ti_slice_input_desc,\n ti_merged_input_desc, ti_invariant_input_desc, ti_body_output_desc,\n ti_concat_output_desc)\n', (34218, 34361), True, 'import ngraph as ng\n'), ((34697, 34752), 'ngraph.parameter', 'ng.parameter', (['[2, 2]'], {'name': '"""init_value"""', 'dtype': 'np.int32'}), "([2, 2], name='init_value', dtype=np.int32)\n", (34709, 34752), True, 'import ngraph as ng\n'), ((34765, 34804), 'ngraph.read_value', 'ng.read_value', (['init_value', '"""var_id_667"""'], {}), "(init_value, 'var_id_667')\n", (34778, 34804), True, 'import ngraph as ng\n'), ((35037, 35092), 'ngraph.parameter', 'ng.parameter', (['[5, 7]'], {'name': '"""input_data"""', 'dtype': 'np.int32'}), "([5, 7], name='input_data', dtype=np.int32)\n", (35049, 35092), True, 'import ngraph as ng\n'), ((35102, 35141), 'ngraph.read_value', 'ng.read_value', (['input_data', '"""var_id_667"""'], {}), "(input_data, 'var_id_667')\n", (35115, 35141), True, 'import ngraph as ng\n'), ((35153, 35180), 'ngraph.assign', 'ng.assign', (['rv', '"""var_id_667"""'], {}), "(rv, 'var_id_667')\n", (35162, 35180), True, 'import ngraph as ng\n'), ((35420, 35479), 'ngraph.parameter', 'ng.parameter', (['[64, 3, 10, 10]'], {'name': '"""image"""', 'dtype': 'np.int32'}), "([64, 3, 10, 10], name='image', dtype=np.int32)\n", (35432, 35479), True, 'import ngraph as ng\n'), ((35572, 35635), 'ngraph.extract_image_patches', 'ng.extract_image_patches', (['image', 'sizes', 'strides', 'rates', 'padding'], {}), '(image, sizes, strides, rates, padding)\n', (35596, 35635), True, 'import ngraph as ng\n'), ((36456, 36500), 'ngraph.parameter', 'ng.parameter', (['X_shape'], {'name': '"""X"""', 'dtype': 'dtype'}), "(X_shape, name='X', dtype=dtype)\n", (36468, 36500), True, 'import ngraph as ng\n'), ((36521, 36569), 'ngraph.parameter', 'ng.parameter', (['H_t_shape'], {'name': '"""H_t"""', 'dtype': 'dtype'}), "(H_t_shape, name='H_t', dtype=dtype)\n", (36533, 36569), True, 'import ngraph as ng\n'), ((36590, 36638), 'ngraph.parameter', 'ng.parameter', (['C_t_shape'], {'name': '"""C_t"""', 'dtype': 'dtype'}), "(C_t_shape, name='C_t', dtype=dtype)\n", (36602, 36638), True, 'import ngraph as ng\n'), ((36663, 36722), 'ngraph.parameter', 'ng.parameter', (['seq_len_shape'], {'name': '"""seq_len"""', 'dtype': 'np.int32'}), "(seq_len_shape, name='seq_len', dtype=np.int32)\n", (36675, 36722), True, 'import ngraph as ng\n'), ((36741, 36785), 'ngraph.parameter', 'ng.parameter', (['W_shape'], {'name': '"""W"""', 'dtype': 'dtype'}), "(W_shape, name='W', dtype=dtype)\n", (36753, 36785), True, 'import ngraph as ng\n'), ((36804, 36848), 'ngraph.parameter', 'ng.parameter', (['R_shape'], {'name': '"""R"""', 'dtype': 'dtype'}), "(R_shape, name='R', dtype=dtype)\n", (36816, 36848), True, 'import ngraph as ng\n'), ((36867, 36911), 'ngraph.parameter', 'ng.parameter', (['B_shape'], {'name': '"""B"""', 'dtype': 'dtype'}), "(B_shape, name='B', dtype=dtype)\n", (36879, 36911), True, 'import ngraph as ng\n'), ((36956, 37105), 'ngraph.lstm_sequence', 'ng.lstm_sequence', (['parameter_X', 'parameter_H_t', 'parameter_C_t', 'parameter_seq_len', 'parameter_W', 'parameter_R', 'parameter_B', 'hidden_size', 'direction'], {}), '(parameter_X, parameter_H_t, parameter_C_t,\n parameter_seq_len, parameter_W, parameter_R, parameter_B, hidden_size,\n direction)\n', (36972, 37105), True, 'import ngraph as ng\n'), ((37425, 37628), 'ngraph.lstm_sequence', 'ng.lstm_sequence', (['parameter_X', 'parameter_H_t', 'parameter_C_t', 'parameter_seq_len', 'parameter_W', 'parameter_R', 'parameter_B', 'hidden_size', 'direction', 'activations', 'activation_alpha', 'activation_beta', 'clip'], {}), '(parameter_X, parameter_H_t, parameter_C_t,\n parameter_seq_len, parameter_W, parameter_R, parameter_B, hidden_size,\n direction, activations, activation_alpha, activation_beta, clip)\n', (37441, 37628), True, 'import ngraph as ng\n'), ((38433, 38477), 'ngraph.parameter', 'ng.parameter', (['X_shape'], {'name': '"""X"""', 'dtype': 'dtype'}), "(X_shape, name='X', dtype=dtype)\n", (38445, 38477), True, 'import ngraph as ng\n'), ((38498, 38546), 'ngraph.parameter', 'ng.parameter', (['H_t_shape'], {'name': '"""H_t"""', 'dtype': 'dtype'}), "(H_t_shape, name='H_t', dtype=dtype)\n", (38510, 38546), True, 'import ngraph as ng\n'), ((38567, 38615), 'ngraph.parameter', 'ng.parameter', (['C_t_shape'], {'name': '"""C_t"""', 'dtype': 'dtype'}), "(C_t_shape, name='C_t', dtype=dtype)\n", (38579, 38615), True, 'import ngraph as ng\n'), ((38640, 38699), 'ngraph.parameter', 'ng.parameter', (['seq_len_shape'], {'name': '"""seq_len"""', 'dtype': 'np.int32'}), "(seq_len_shape, name='seq_len', dtype=np.int32)\n", (38652, 38699), True, 'import ngraph as ng\n'), ((38718, 38762), 'ngraph.parameter', 'ng.parameter', (['W_shape'], {'name': '"""W"""', 'dtype': 'dtype'}), "(W_shape, name='W', dtype=dtype)\n", (38730, 38762), True, 'import ngraph as ng\n'), ((38781, 38825), 'ngraph.parameter', 'ng.parameter', (['R_shape'], {'name': '"""R"""', 'dtype': 'dtype'}), "(R_shape, name='R', dtype=dtype)\n", (38793, 38825), True, 'import ngraph as ng\n'), ((38844, 38888), 'ngraph.parameter', 'ng.parameter', (['B_shape'], {'name': '"""B"""', 'dtype': 'dtype'}), "(B_shape, name='B', dtype=dtype)\n", (38856, 38888), True, 'import ngraph as ng\n'), ((38936, 39085), 'ngraph.lstm_sequence', 'ng.lstm_sequence', (['parameter_X', 'parameter_H_t', 'parameter_C_t', 'parameter_seq_len', 'parameter_W', 'parameter_R', 'parameter_B', 'hidden_size', 'direction'], {}), '(parameter_X, parameter_H_t, parameter_C_t,\n parameter_seq_len, parameter_W, parameter_R, parameter_B, hidden_size,\n direction)\n', (38952, 39085), True, 'import ngraph as ng\n'), ((39421, 39624), 'ngraph.lstm_sequence', 'ng.lstm_sequence', (['parameter_X', 'parameter_H_t', 'parameter_C_t', 'parameter_seq_len', 'parameter_W', 'parameter_R', 'parameter_B', 'hidden_size', 'direction', 'activations', 'activation_alpha', 'activation_beta', 'clip'], {}), '(parameter_X, parameter_H_t, parameter_C_t,\n parameter_seq_len, parameter_W, parameter_R, parameter_B, hidden_size,\n direction, activations, activation_alpha, activation_beta, clip)\n', (39437, 39624), True, 'import ngraph as ng\n'), ((40429, 40473), 'ngraph.parameter', 'ng.parameter', (['X_shape'], {'name': '"""X"""', 'dtype': 'dtype'}), "(X_shape, name='X', dtype=dtype)\n", (40441, 40473), True, 'import ngraph as ng\n'), ((40494, 40542), 'ngraph.parameter', 'ng.parameter', (['H_t_shape'], {'name': '"""H_t"""', 'dtype': 'dtype'}), "(H_t_shape, name='H_t', dtype=dtype)\n", (40506, 40542), True, 'import ngraph as ng\n'), ((40563, 40611), 'ngraph.parameter', 'ng.parameter', (['C_t_shape'], {'name': '"""C_t"""', 'dtype': 'dtype'}), "(C_t_shape, name='C_t', dtype=dtype)\n", (40575, 40611), True, 'import ngraph as ng\n'), ((40636, 40695), 'ngraph.parameter', 'ng.parameter', (['seq_len_shape'], {'name': '"""seq_len"""', 'dtype': 'np.int32'}), "(seq_len_shape, name='seq_len', dtype=np.int32)\n", (40648, 40695), True, 'import ngraph as ng\n'), ((40714, 40758), 'ngraph.parameter', 'ng.parameter', (['W_shape'], {'name': '"""W"""', 'dtype': 'dtype'}), "(W_shape, name='W', dtype=dtype)\n", (40726, 40758), True, 'import ngraph as ng\n'), ((40777, 40821), 'ngraph.parameter', 'ng.parameter', (['R_shape'], {'name': '"""R"""', 'dtype': 'dtype'}), "(R_shape, name='R', dtype=dtype)\n", (40789, 40821), True, 'import ngraph as ng\n'), ((40840, 40884), 'ngraph.parameter', 'ng.parameter', (['B_shape'], {'name': '"""B"""', 'dtype': 'dtype'}), "(B_shape, name='B', dtype=dtype)\n", (40852, 40884), True, 'import ngraph as ng\n'), ((40932, 41081), 'ngraph.lstm_sequence', 'ng.lstm_sequence', (['parameter_X', 'parameter_H_t', 'parameter_C_t', 'parameter_seq_len', 'parameter_W', 'parameter_R', 'parameter_B', 'hidden_size', 'direction'], {}), '(parameter_X, parameter_H_t, parameter_C_t,\n parameter_seq_len, parameter_W, parameter_R, parameter_B, hidden_size,\n direction)\n', (40948, 41081), True, 'import ngraph as ng\n'), ((41390, 41593), 'ngraph.lstm_sequence', 'ng.lstm_sequence', (['parameter_X', 'parameter_H_t', 'parameter_C_t', 'parameter_seq_len', 'parameter_W', 'parameter_R', 'parameter_B', 'hidden_size', 'direction', 'activations', 'activation_alpha', 'activation_beta', 'clip'], {}), '(parameter_X, parameter_H_t, parameter_C_t,\n parameter_seq_len, parameter_W, parameter_R, parameter_B, hidden_size,\n direction, activations, activation_alpha, activation_beta, clip)\n', (41406, 41593), True, 'import ngraph as ng\n'), ((42336, 42380), 'ngraph.parameter', 'ng.parameter', (['X_shape'], {'name': '"""X"""', 'dtype': 'dtype'}), "(X_shape, name='X', dtype=dtype)\n", (42348, 42380), True, 'import ngraph as ng\n'), ((42401, 42449), 'ngraph.parameter', 'ng.parameter', (['H_t_shape'], {'name': '"""H_t"""', 'dtype': 'dtype'}), "(H_t_shape, name='H_t', dtype=dtype)\n", (42413, 42449), True, 'import ngraph as ng\n'), ((42474, 42533), 'ngraph.parameter', 'ng.parameter', (['seq_len_shape'], {'name': '"""seq_len"""', 'dtype': 'np.int32'}), "(seq_len_shape, name='seq_len', dtype=np.int32)\n", (42486, 42533), True, 'import ngraph as ng\n'), ((42552, 42596), 'ngraph.parameter', 'ng.parameter', (['W_shape'], {'name': '"""W"""', 'dtype': 'dtype'}), "(W_shape, name='W', dtype=dtype)\n", (42564, 42596), True, 'import ngraph as ng\n'), ((42615, 42659), 'ngraph.parameter', 'ng.parameter', (['R_shape'], {'name': '"""R"""', 'dtype': 'dtype'}), "(R_shape, name='R', dtype=dtype)\n", (42627, 42659), True, 'import ngraph as ng\n'), ((42678, 42722), 'ngraph.parameter', 'ng.parameter', (['B_shape'], {'name': '"""B"""', 'dtype': 'dtype'}), "(B_shape, name='B', dtype=dtype)\n", (42690, 42722), True, 'import ngraph as ng\n'), ((42767, 42896), 'ngraph.gru_sequence', 'ng.gru_sequence', (['parameter_X', 'parameter_H_t', 'parameter_seq_len', 'parameter_W', 'parameter_R', 'parameter_B', 'hidden_size', 'direction'], {}), '(parameter_X, parameter_H_t, parameter_seq_len, parameter_W,\n parameter_R, parameter_B, hidden_size, direction)\n', (42782, 42896), True, 'import ngraph as ng\n'), ((43279, 43323), 'ngraph.parameter', 'ng.parameter', (['B_shape'], {'name': '"""B"""', 'dtype': 'dtype'}), "(B_shape, name='B', dtype=dtype)\n", (43291, 43323), True, 'import ngraph as ng\n'), ((43342, 43550), 'ngraph.gru_sequence', 'ng.gru_sequence', (['parameter_X', 'parameter_H_t', 'parameter_seq_len', 'parameter_W', 'parameter_R', 'parameter_B', 'hidden_size', 'direction', 'activations', 'activation_alpha', 'activation_beta', 'clip', 'linear_before_reset'], {}), '(parameter_X, parameter_H_t, parameter_seq_len, parameter_W,\n parameter_R, parameter_B, hidden_size, direction, activations,\n activation_alpha, activation_beta, clip, linear_before_reset)\n', (43357, 43550), True, 'import ngraph as ng\n'), ((44294, 44338), 'ngraph.parameter', 'ng.parameter', (['X_shape'], {'name': '"""X"""', 'dtype': 'dtype'}), "(X_shape, name='X', dtype=dtype)\n", (44306, 44338), True, 'import ngraph as ng\n'), ((44359, 44407), 'ngraph.parameter', 'ng.parameter', (['H_t_shape'], {'name': '"""H_t"""', 'dtype': 'dtype'}), "(H_t_shape, name='H_t', dtype=dtype)\n", (44371, 44407), True, 'import ngraph as ng\n'), ((44432, 44491), 'ngraph.parameter', 'ng.parameter', (['seq_len_shape'], {'name': '"""seq_len"""', 'dtype': 'np.int32'}), "(seq_len_shape, name='seq_len', dtype=np.int32)\n", (44444, 44491), True, 'import ngraph as ng\n'), ((44510, 44554), 'ngraph.parameter', 'ng.parameter', (['W_shape'], {'name': '"""W"""', 'dtype': 'dtype'}), "(W_shape, name='W', dtype=dtype)\n", (44522, 44554), True, 'import ngraph as ng\n'), ((44573, 44617), 'ngraph.parameter', 'ng.parameter', (['R_shape'], {'name': '"""R"""', 'dtype': 'dtype'}), "(R_shape, name='R', dtype=dtype)\n", (44585, 44617), True, 'import ngraph as ng\n'), ((44636, 44680), 'ngraph.parameter', 'ng.parameter', (['B_shape'], {'name': '"""B"""', 'dtype': 'dtype'}), "(B_shape, name='B', dtype=dtype)\n", (44648, 44680), True, 'import ngraph as ng\n'), ((44728, 44857), 'ngraph.gru_sequence', 'ng.gru_sequence', (['parameter_X', 'parameter_H_t', 'parameter_seq_len', 'parameter_W', 'parameter_R', 'parameter_B', 'hidden_size', 'direction'], {}), '(parameter_X, parameter_H_t, parameter_seq_len, parameter_W,\n parameter_R, parameter_B, hidden_size, direction)\n', (44743, 44857), True, 'import ngraph as ng\n'), ((45256, 45300), 'ngraph.parameter', 'ng.parameter', (['B_shape'], {'name': '"""B"""', 'dtype': 'dtype'}), "(B_shape, name='B', dtype=dtype)\n", (45268, 45300), True, 'import ngraph as ng\n'), ((45319, 45527), 'ngraph.gru_sequence', 'ng.gru_sequence', (['parameter_X', 'parameter_H_t', 'parameter_seq_len', 'parameter_W', 'parameter_R', 'parameter_B', 'hidden_size', 'direction', 'activations', 'activation_alpha', 'activation_beta', 'clip', 'linear_before_reset'], {}), '(parameter_X, parameter_H_t, parameter_seq_len, parameter_W,\n parameter_R, parameter_B, hidden_size, direction, activations,\n activation_alpha, activation_beta, clip, linear_before_reset)\n', (45334, 45527), True, 'import ngraph as ng\n'), ((46271, 46315), 'ngraph.parameter', 'ng.parameter', (['X_shape'], {'name': '"""X"""', 'dtype': 'dtype'}), "(X_shape, name='X', dtype=dtype)\n", (46283, 46315), True, 'import ngraph as ng\n'), ((46336, 46384), 'ngraph.parameter', 'ng.parameter', (['H_t_shape'], {'name': '"""H_t"""', 'dtype': 'dtype'}), "(H_t_shape, name='H_t', dtype=dtype)\n", (46348, 46384), True, 'import ngraph as ng\n'), ((46409, 46468), 'ngraph.parameter', 'ng.parameter', (['seq_len_shape'], {'name': '"""seq_len"""', 'dtype': 'np.int32'}), "(seq_len_shape, name='seq_len', dtype=np.int32)\n", (46421, 46468), True, 'import ngraph as ng\n'), ((46487, 46531), 'ngraph.parameter', 'ng.parameter', (['W_shape'], {'name': '"""W"""', 'dtype': 'dtype'}), "(W_shape, name='W', dtype=dtype)\n", (46499, 46531), True, 'import ngraph as ng\n'), ((46550, 46594), 'ngraph.parameter', 'ng.parameter', (['R_shape'], {'name': '"""R"""', 'dtype': 'dtype'}), "(R_shape, name='R', dtype=dtype)\n", (46562, 46594), True, 'import ngraph as ng\n'), ((46613, 46657), 'ngraph.parameter', 'ng.parameter', (['B_shape'], {'name': '"""B"""', 'dtype': 'dtype'}), "(B_shape, name='B', dtype=dtype)\n", (46625, 46657), True, 'import ngraph as ng\n'), ((46705, 46834), 'ngraph.gru_sequence', 'ng.gru_sequence', (['parameter_X', 'parameter_H_t', 'parameter_seq_len', 'parameter_W', 'parameter_R', 'parameter_B', 'hidden_size', 'direction'], {}), '(parameter_X, parameter_H_t, parameter_seq_len, parameter_W,\n parameter_R, parameter_B, hidden_size, direction)\n', (46720, 46834), True, 'import ngraph as ng\n'), ((47212, 47256), 'ngraph.parameter', 'ng.parameter', (['B_shape'], {'name': '"""B"""', 'dtype': 'dtype'}), "(B_shape, name='B', dtype=dtype)\n", (47224, 47256), True, 'import ngraph as ng\n'), ((47269, 47477), 'ngraph.gru_sequence', 'ng.gru_sequence', (['parameter_X', 'parameter_H_t', 'parameter_seq_len', 'parameter_W', 'parameter_R', 'parameter_B', 'hidden_size', 'direction', 'activations', 'activation_alpha', 'activation_beta', 'clip', 'linear_before_reset'], {}), '(parameter_X, parameter_H_t, parameter_seq_len, parameter_W,\n parameter_R, parameter_B, hidden_size, direction, activations,\n activation_alpha, activation_beta, clip, linear_before_reset)\n', (47284, 47477), True, 'import ngraph as ng\n'), ((48206, 48250), 'ngraph.parameter', 'ng.parameter', (['X_shape'], {'name': '"""X"""', 'dtype': 'dtype'}), "(X_shape, name='X', dtype=dtype)\n", (48218, 48250), True, 'import ngraph as ng\n'), ((48271, 48319), 'ngraph.parameter', 'ng.parameter', (['H_t_shape'], {'name': '"""H_t"""', 'dtype': 'dtype'}), "(H_t_shape, name='H_t', dtype=dtype)\n", (48283, 48319), True, 'import ngraph as ng\n'), ((48344, 48403), 'ngraph.parameter', 'ng.parameter', (['seq_len_shape'], {'name': '"""seq_len"""', 'dtype': 'np.int32'}), "(seq_len_shape, name='seq_len', dtype=np.int32)\n", (48356, 48403), True, 'import ngraph as ng\n'), ((48422, 48466), 'ngraph.parameter', 'ng.parameter', (['W_shape'], {'name': '"""W"""', 'dtype': 'dtype'}), "(W_shape, name='W', dtype=dtype)\n", (48434, 48466), True, 'import ngraph as ng\n'), ((48485, 48529), 'ngraph.parameter', 'ng.parameter', (['R_shape'], {'name': '"""R"""', 'dtype': 'dtype'}), "(R_shape, name='R', dtype=dtype)\n", (48497, 48529), True, 'import ngraph as ng\n'), ((48548, 48592), 'ngraph.parameter', 'ng.parameter', (['B_shape'], {'name': '"""B"""', 'dtype': 'dtype'}), "(B_shape, name='B', dtype=dtype)\n", (48560, 48592), True, 'import ngraph as ng\n'), ((48637, 48766), 'ngraph.rnn_sequence', 'ng.rnn_sequence', (['parameter_X', 'parameter_H_t', 'parameter_seq_len', 'parameter_W', 'parameter_R', 'parameter_B', 'hidden_size', 'direction'], {}), '(parameter_X, parameter_H_t, parameter_seq_len, parameter_W,\n parameter_R, parameter_B, hidden_size, direction)\n', (48652, 48766), True, 'import ngraph as ng\n'), ((49070, 49257), 'ngraph.rnn_sequence', 'ng.rnn_sequence', (['parameter_X', 'parameter_H_t', 'parameter_seq_len', 'parameter_W', 'parameter_R', 'parameter_B', 'hidden_size', 'direction', 'activations', 'activation_alpha', 'activation_beta', 'clip'], {}), '(parameter_X, parameter_H_t, parameter_seq_len, parameter_W,\n parameter_R, parameter_B, hidden_size, direction, activations,\n activation_alpha, activation_beta, clip)\n', (49085, 49257), True, 'import ngraph as ng\n'), ((49982, 50026), 'ngraph.parameter', 'ng.parameter', (['X_shape'], {'name': '"""X"""', 'dtype': 'dtype'}), "(X_shape, name='X', dtype=dtype)\n", (49994, 50026), True, 'import ngraph as ng\n'), ((50047, 50095), 'ngraph.parameter', 'ng.parameter', (['H_t_shape'], {'name': '"""H_t"""', 'dtype': 'dtype'}), "(H_t_shape, name='H_t', dtype=dtype)\n", (50059, 50095), True, 'import ngraph as ng\n'), ((50120, 50179), 'ngraph.parameter', 'ng.parameter', (['seq_len_shape'], {'name': '"""seq_len"""', 'dtype': 'np.int32'}), "(seq_len_shape, name='seq_len', dtype=np.int32)\n", (50132, 50179), True, 'import ngraph as ng\n'), ((50198, 50242), 'ngraph.parameter', 'ng.parameter', (['W_shape'], {'name': '"""W"""', 'dtype': 'dtype'}), "(W_shape, name='W', dtype=dtype)\n", (50210, 50242), True, 'import ngraph as ng\n'), ((50261, 50305), 'ngraph.parameter', 'ng.parameter', (['R_shape'], {'name': '"""R"""', 'dtype': 'dtype'}), "(R_shape, name='R', dtype=dtype)\n", (50273, 50305), True, 'import ngraph as ng\n'), ((50324, 50368), 'ngraph.parameter', 'ng.parameter', (['B_shape'], {'name': '"""B"""', 'dtype': 'dtype'}), "(B_shape, name='B', dtype=dtype)\n", (50336, 50368), True, 'import ngraph as ng\n'), ((50416, 50545), 'ngraph.rnn_sequence', 'ng.rnn_sequence', (['parameter_X', 'parameter_H_t', 'parameter_seq_len', 'parameter_W', 'parameter_R', 'parameter_B', 'hidden_size', 'direction'], {}), '(parameter_X, parameter_H_t, parameter_seq_len, parameter_W,\n parameter_R, parameter_B, hidden_size, direction)\n', (50431, 50545), True, 'import ngraph as ng\n'), ((50865, 51052), 'ngraph.rnn_sequence', 'ng.rnn_sequence', (['parameter_X', 'parameter_H_t', 'parameter_seq_len', 'parameter_W', 'parameter_R', 'parameter_B', 'hidden_size', 'direction', 'activations', 'activation_alpha', 'activation_beta', 'clip'], {}), '(parameter_X, parameter_H_t, parameter_seq_len, parameter_W,\n parameter_R, parameter_B, hidden_size, direction, activations,\n activation_alpha, activation_beta, clip)\n', (50880, 51052), True, 'import ngraph as ng\n'), ((51777, 51821), 'ngraph.parameter', 'ng.parameter', (['X_shape'], {'name': '"""X"""', 'dtype': 'dtype'}), "(X_shape, name='X', dtype=dtype)\n", (51789, 51821), True, 'import ngraph as ng\n'), ((51842, 51890), 'ngraph.parameter', 'ng.parameter', (['H_t_shape'], {'name': '"""H_t"""', 'dtype': 'dtype'}), "(H_t_shape, name='H_t', dtype=dtype)\n", (51854, 51890), True, 'import ngraph as ng\n'), ((51915, 51974), 'ngraph.parameter', 'ng.parameter', (['seq_len_shape'], {'name': '"""seq_len"""', 'dtype': 'np.int32'}), "(seq_len_shape, name='seq_len', dtype=np.int32)\n", (51927, 51974), True, 'import ngraph as ng\n'), ((51993, 52037), 'ngraph.parameter', 'ng.parameter', (['W_shape'], {'name': '"""W"""', 'dtype': 'dtype'}), "(W_shape, name='W', dtype=dtype)\n", (52005, 52037), True, 'import ngraph as ng\n'), ((52056, 52100), 'ngraph.parameter', 'ng.parameter', (['R_shape'], {'name': '"""R"""', 'dtype': 'dtype'}), "(R_shape, name='R', dtype=dtype)\n", (52068, 52100), True, 'import ngraph as ng\n'), ((52119, 52163), 'ngraph.parameter', 'ng.parameter', (['B_shape'], {'name': '"""B"""', 'dtype': 'dtype'}), "(B_shape, name='B', dtype=dtype)\n", (52131, 52163), True, 'import ngraph as ng\n'), ((52211, 52340), 'ngraph.rnn_sequence', 'ng.rnn_sequence', (['parameter_X', 'parameter_H_t', 'parameter_seq_len', 'parameter_W', 'parameter_R', 'parameter_B', 'hidden_size', 'direction'], {}), '(parameter_X, parameter_H_t, parameter_seq_len, parameter_W,\n parameter_R, parameter_B, hidden_size, direction)\n', (52226, 52340), True, 'import ngraph as ng\n'), ((52633, 52820), 'ngraph.rnn_sequence', 'ng.rnn_sequence', (['parameter_X', 'parameter_H_t', 'parameter_seq_len', 'parameter_W', 'parameter_R', 'parameter_B', 'hidden_size', 'direction', 'activations', 'activation_alpha', 'activation_beta', 'clip'], {}), '(parameter_X, parameter_H_t, parameter_seq_len, parameter_W,\n parameter_R, parameter_B, hidden_size, direction, activations,\n activation_alpha, activation_beta, clip)\n', (52648, 52820), True, 'import ngraph as ng\n'), ((25718, 25739), '_pyngraph.PartialShape', 'PartialShape', (['[-1, 2]'], {}), '([-1, 2])\n', (25730, 25739), False, 'from _pyngraph import PartialShape\n'), ((26413, 26434), '_pyngraph.PartialShape', 'PartialShape', (['[-1, 2]'], {}), '([-1, 2])\n', (26425, 26434), False, 'from _pyngraph import PartialShape\n'), ((27324, 27353), 'numpy.array', 'np.array', (['[2, 2]'], {'dtype': 'dtype'}), '([2, 2], dtype=dtype)\n', (27332, 27353), True, 'import numpy as np\n'), ((28233, 28265), 'numpy.array', 'np.array', (['[2, 3]'], {'dtype': 'fp_dtype'}), '([2, 3], dtype=fp_dtype)\n', (28241, 28265), True, 'import numpy as np\n'), ((28291, 28332), 'numpy.array', 'np.array', (['[1.5, 2.0, 2.5]'], {'dtype': 'fp_dtype'}), '([1.5, 2.0, 2.5], dtype=fp_dtype)\n', (28299, 28332), True, 'import numpy as np\n'), ((28404, 28439), 'numpy.array', 'np.array', (['[32, 32]'], {'dtype': 'int_dtype'}), '([32, 32], dtype=int_dtype)\n', (28412, 28439), True, 'import numpy as np\n'), ((29222, 29263), 'numpy.array', 'np.array', (['[4.0, 2.0, 3.2]'], {'dtype': 'fp_dtype'}), '([4.0, 2.0, 3.2], dtype=fp_dtype)\n', (29230, 29263), True, 'import numpy as np\n'), ((29283, 29324), 'numpy.array', 'np.array', (['[1.0, 2.0, 1.0]'], {'dtype': 'fp_dtype'}), '([1.0, 2.0, 1.0], dtype=fp_dtype)\n', (29291, 29324), True, 'import numpy as np\n'), ((29363, 29398), 'numpy.array', 'np.array', (['[19, 19]'], {'dtype': 'int_dtype'}), '([19, 19], dtype=int_dtype)\n', (29371, 29398), True, 'import numpy as np\n'), ((30152, 30183), 'numpy.array', 'np.array', (['[64]'], {'dtype': 'int_dtype'}), '([64], dtype=int_dtype)\n', (30160, 30183), True, 'import numpy as np\n'), ((31423, 31469), 'numpy.array', 'np.array', (['[0.1, 1.5, 2.0, 2.5]'], {'dtype': 'fp_dtype'}), '([0.1, 1.5, 2.0, 2.5], dtype=fp_dtype)\n', (31431, 31469), True, 'import numpy as np\n'), ((31488, 31526), 'numpy.array', 'np.array', (['[2, 3, 3, 4]'], {'dtype': 'fp_dtype'}), '([2, 3, 3, 4], dtype=fp_dtype)\n', (31496, 31526), True, 'import numpy as np\n'), ((32690, 32722), 'ngraph.convert', 'ng.convert', (['body_timestep', '"""f32"""'], {}), "(body_timestep, 'f32')\n", (32700, 32722), True, 'import ngraph as ng\n'), ((32779, 32808), 'ngraph.squeeze', 'ng.squeeze', (['body_data_in', '[0]'], {}), '(body_data_in, [0])\n', (32789, 32808), True, 'import ngraph as ng\n'), ((32902, 32929), 'ngraph.convert', 'ng.convert', (['elem_cnt', '"""f32"""'], {}), "(elem_cnt, 'f32')\n", (32912, 32929), True, 'import ngraph as ng\n'), ((33183, 33217), 'numpy.zeros', 'np.zeros', (['[2, 2]'], {'dtype': 'np.float32'}), '([2, 2], dtype=np.float32)\n', (33191, 33217), True, 'import numpy as np\n'), ((33267, 33279), 'numpy.int32', 'np.int32', (['(16)'], {}), '(16)\n', (33275, 33279), True, 'import numpy as np\n'), ((33281, 33292), 'numpy.int32', 'np.int32', (['(1)'], {}), '(1)\n', (33289, 33292), True, 'import numpy as np\n'), ((33585, 33635), 'ngraph.utils.tensor_iterator_types.TensorIteratorSliceInputDesc', 'TensorIteratorSliceInputDesc', (['(0)', '(0)', '(0)', '(1)', '(1)', '(-1)', '(0)'], {}), '(0, 0, 0, 1, 1, -1, 0)\n', (33613, 33635), False, 'from ngraph.utils.tensor_iterator_types import GraphBody, TensorIteratorSliceInputDesc, TensorIteratorMergedInputDesc, TensorIteratorInvariantInputDesc, TensorIteratorBodyOutputDesc, TensorIteratorConcatOutputDesc\n'), ((33660, 33710), 'ngraph.utils.tensor_iterator_types.TensorIteratorSliceInputDesc', 'TensorIteratorSliceInputDesc', (['(1)', '(1)', '(0)', '(1)', '(1)', '(-1)', '(0)'], {}), '(1, 1, 0, 1, 1, -1, 0)\n', (33688, 33710), False, 'from ngraph.utils.tensor_iterator_types import GraphBody, TensorIteratorSliceInputDesc, TensorIteratorMergedInputDesc, TensorIteratorInvariantInputDesc, TensorIteratorBodyOutputDesc, TensorIteratorConcatOutputDesc\n'), ((33784, 33822), 'ngraph.utils.tensor_iterator_types.TensorIteratorMergedInputDesc', 'TensorIteratorMergedInputDesc', (['(2)', '(2)', '(0)'], {}), '(2, 2, 0)\n', (33813, 33822), False, 'from ngraph.utils.tensor_iterator_types import GraphBody, TensorIteratorSliceInputDesc, TensorIteratorMergedInputDesc, TensorIteratorInvariantInputDesc, TensorIteratorBodyOutputDesc, TensorIteratorConcatOutputDesc\n'), ((33895, 33933), 'ngraph.utils.tensor_iterator_types.TensorIteratorInvariantInputDesc', 'TensorIteratorInvariantInputDesc', (['(3)', '(3)'], {}), '(3, 3)\n', (33927, 33933), False, 'from ngraph.utils.tensor_iterator_types import GraphBody, TensorIteratorSliceInputDesc, TensorIteratorMergedInputDesc, TensorIteratorInvariantInputDesc, TensorIteratorBodyOutputDesc, TensorIteratorConcatOutputDesc\n'), ((34019, 34057), 'ngraph.utils.tensor_iterator_types.TensorIteratorBodyOutputDesc', 'TensorIteratorBodyOutputDesc', (['(0)', '(0)', '(-1)'], {}), '(0, 0, -1)\n', (34047, 34057), False, 'from ngraph.utils.tensor_iterator_types import GraphBody, TensorIteratorSliceInputDesc, TensorIteratorMergedInputDesc, TensorIteratorInvariantInputDesc, TensorIteratorBodyOutputDesc, TensorIteratorConcatOutputDesc\n'), ((34128, 34180), 'ngraph.utils.tensor_iterator_types.TensorIteratorConcatOutputDesc', 'TensorIteratorConcatOutputDesc', (['(1)', '(1)', '(0)', '(1)', '(1)', '(-1)', '(0)'], {}), '(1, 1, 0, 1, 1, -1, 0)\n', (34158, 34180), False, 'from ngraph.utils.tensor_iterator_types import GraphBody, TensorIteratorSliceInputDesc, TensorIteratorMergedInputDesc, TensorIteratorInvariantInputDesc, TensorIteratorBodyOutputDesc, TensorIteratorConcatOutputDesc\n')] |
# NAME: <NAME>
# template matching
# import all the required libraries packages
import cv2
import numpy as np
import argparse
import json
import os
from timeit import default_timer as timer
from skimage.io import imread_collection
# construct the argument parser and parse the arguments
#ap = argparse.ArgumentParser()
#ap.add_argument("-t", "--template", required=True, help="Path to template image")
#ap.add_argument("-i", "--images", required=True, help="Path to images where template will be matched")
#args = vars(ap.parse_args())
TEMPLATE = "template"
TEMPLATE_SCENE = "template_scene"
# key point & descriptor function
def kp_des(coll_query, coll_train):
print('******Running KP_DES******')
# get image
for img in coll_query:
# find the keypoints and descriptors with SIFT
kp_query, des_query = detector.detectAndCompute(img,None)
kp_des_query.append((kp_query, des_query))
# get template
for img in coll_train:
img_train = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# find the keypoints and descriptors with SIFT
kp_train, des_train = detector.detectAndCompute(img_train,None)
kp_des_train.append((kp_train, des_train))
print('**********KP_DES************')
return(kp_des_query, kp_des_train)
# define function for finding key matches
def find_matches(des_query, des_train, kp1, kp2):
start1 = timer()
key_matches = 0
# FLANN parameters
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 10)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params,search_params)
if(len(kp1)>=2 and len(kp2)>=2) :
matches = flann.knnMatch(des_query, des_train, k=2)
# Need to draw only good matches, so create a mask
matchesMask = [[0,0] for i in range(len(matches))]
# ratio test
for i,(m,n) in enumerate(matches):
if m.distance < 0.7*n.distance:
matchesMask[i]=[1,0]
key_matches = key_matches + 1
print('key_matches: ', key_matches)
end1 = timer()
print('find_match_time: ', (end1 - start1))
return(key_matches, matches)
# match query image and template image function
def temp_query_match(coll_train, coll_query, kp_des_train, kp_des_query, query_name, train_name):
print('******inside temp_query_match******')
# run a loop through template images
for i,template in enumerate(coll_train):
print('------------------------------')
print(train_name[i])
print('------------------------------')
if (train_name[i] == 'INSERT IMAGE NAMES THAT DOES NOT HAVE MANY FEATURES OR TOO SMALL'): #because this image data is causing problem, so skip it
dicto['na'].append((train_name[i],[]))
continue
# get image and resize(to work with small template)
trainImg = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)
# run a loop through images
for j,imagePath in enumerate(coll_query):
print('************************')
print(query_name[j])
print('************************')
# get image and find the no. of matches using find_match()
QueryImgBGR = imagePath
key_matches, matches = find_matches(kp_des_query[j][1], kp_des_train[i][1], kp_des_query[j][0], kp_des_train[i][0])
if matches == 0:
dicto['na'].append((train_name[i],[]))
continue
# add image path to dictionary as key
if query_name[j] not in dicto.keys():
dicto.setdefault(query_name[j],[])
# to check for major matches
if key_matches > 70:
# compute matches with distance less than 0.75
goodMatch=[]
for m,n in matches:
if(m.distance<0.55*n.distance):
goodMatch.append(m)
# check if no. of matches is greater than your initialization and get template & query img keypts
if(len(goodMatch)>MIN_MATCH_COUNT):
tp=[]
qp=[]
for m in goodMatch:
tp.append(kp_des_train[i][0][m.trainIdx].pt)
qp.append(kp_des_query[j][0][m.queryIdx].pt)
tp,qp=np.float32((tp,qp))
H,status=cv2.findHomography(tp,qp,cv2.RANSAC,3.0)
# get the coordinates of corner pts and add it to dictionary
h,w=trainImg.shape
trainBorder=np.float32([[[0,0],[0,h-1],[w-1,h-1],[w-1,0]]])
if H is not None:
queryBorder=cv2.perspectiveTransform(trainBorder,H)
cv2.polylines(QueryImgBGR,[np.int32(queryBorder)],True,(0,255,0),2)
print('queryborder: ', queryBorder)
print("Object found- %d/%d"%(len(goodMatch),MIN_MATCH_COUNT))
print(trainBorder)
dicto[query_name[j]].append(tuple((train_name[i],[int(queryBorder[0][0][0]),int(queryBorder[0][0][-1]), int(queryBorder[0][2][0]),int(queryBorder[0][2][-1])])))
print(dicto)
break
else:
print ("Not Enough match found- %d/%d"%(len(goodMatch),MIN_MATCH_COUNT))
# if no template has found match, add it to 'na' key in dictionary
else:
dicto['na'].append(tuple((train_name[i],[])))
print(dicto)
return(dicto)
# get names function
def load_images_from_folder(folder):
name = []
for filename in os.listdir(folder):
name.append(filename)
return name
# function main
def main():
# get names of images
train_name = load_images_from_folder(TEMPLATE)
query_name = load_images_from_folder(TEMPLATE_SCENE)
# your path
col_dir_train = TEMPLATE + "/*.png"
col_dir_query = TEMPLATE_SCENE + "/*.png"
# creating a collection with the available images
coll_train = imread_collection(col_dir_train)
coll_query = imread_collection(col_dir_query)
kp_des_query, kp_des_train = kp_des(coll_query, coll_train)
dicto = temp_query_match(coll_train, coll_query, kp_des_train, kp_des_query, query_name, train_name)
# create a json file for dictionary
with open('data.json', 'w') as file:
json.dump(dicto, file, ensure_ascii=False, indent = 4)
# Sift object
detector=cv2.xfeatures2d.SIFT_create()
# initialize
MIN_MATCH_COUNT=60
dicto = {}
dicto.setdefault('na',[])
kp_des_query = []
kp_des_train = []
coll_train = []
coll_query = []
main()
| [
"json.dump",
"cv2.cvtColor",
"timeit.default_timer",
"numpy.float32",
"cv2.FlannBasedMatcher",
"numpy.int32",
"cv2.xfeatures2d.SIFT_create",
"cv2.perspectiveTransform",
"skimage.io.imread_collection",
"cv2.findHomography",
"os.listdir"
] | [((6757, 6786), 'cv2.xfeatures2d.SIFT_create', 'cv2.xfeatures2d.SIFT_create', ([], {}), '()\n', (6784, 6786), False, 'import cv2\n'), ((1445, 1452), 'timeit.default_timer', 'timer', ([], {}), '()\n', (1450, 1452), True, 'from timeit import default_timer as timer\n'), ((1644, 1694), 'cv2.FlannBasedMatcher', 'cv2.FlannBasedMatcher', (['index_params', 'search_params'], {}), '(index_params, search_params)\n', (1665, 1694), False, 'import cv2\n'), ((2139, 2146), 'timeit.default_timer', 'timer', ([], {}), '()\n', (2144, 2146), True, 'from timeit import default_timer as timer\n'), ((5927, 5945), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (5937, 5945), False, 'import os\n'), ((6336, 6368), 'skimage.io.imread_collection', 'imread_collection', (['col_dir_train'], {}), '(col_dir_train)\n', (6353, 6368), False, 'from skimage.io import imread_collection\n'), ((6386, 6418), 'skimage.io.imread_collection', 'imread_collection', (['col_dir_query'], {}), '(col_dir_query)\n', (6403, 6418), False, 'from skimage.io import imread_collection\n'), ((1026, 1063), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1038, 1063), False, 'import cv2\n'), ((2991, 3033), 'cv2.cvtColor', 'cv2.cvtColor', (['template', 'cv2.COLOR_BGR2GRAY'], {}), '(template, cv2.COLOR_BGR2GRAY)\n', (3003, 3033), False, 'import cv2\n'), ((6677, 6729), 'json.dump', 'json.dump', (['dicto', 'file'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(dicto, file, ensure_ascii=False, indent=4)\n', (6686, 6729), False, 'import json\n'), ((4555, 4575), 'numpy.float32', 'np.float32', (['(tp, qp)'], {}), '((tp, qp))\n', (4565, 4575), True, 'import numpy as np\n'), ((4604, 4647), 'cv2.findHomography', 'cv2.findHomography', (['tp', 'qp', 'cv2.RANSAC', '(3.0)'], {}), '(tp, qp, cv2.RANSAC, 3.0)\n', (4622, 4647), False, 'import cv2\n'), ((4818, 4880), 'numpy.float32', 'np.float32', (['[[[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]]'], {}), '([[[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]])\n', (4828, 4880), True, 'import numpy as np\n'), ((4941, 4981), 'cv2.perspectiveTransform', 'cv2.perspectiveTransform', (['trainBorder', 'H'], {}), '(trainBorder, H)\n', (4965, 4981), False, 'import cv2\n'), ((5032, 5053), 'numpy.int32', 'np.int32', (['queryBorder'], {}), '(queryBorder)\n', (5040, 5053), True, 'import numpy as np\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides a container class to store parameters for the
geometry of an ellipse.
"""
import math
from astropy import log
import numpy as np
__all__ = ['EllipseGeometry']
IN_MASK = [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
]
OUT_MASK = [
[1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1],
[1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1],
[1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1],
[1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1],
[1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1],
[1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
]
def _area(sma, eps, phi, r):
"""
Compute elliptical sector area.
"""
aux = r * math.cos(phi) / sma
signal = aux / abs(aux)
if abs(aux) >= 1.:
aux = signal
return abs(sma**2 * (1.-eps) / 2. * math.acos(aux))
class EllipseGeometry:
"""
Container class to store parameters for the geometry of an ellipse.
Parameters that describe the relationship of a given ellipse with
other associated ellipses are also encapsulated in this container.
These associated ellipses may include, e.g., the two (inner and
outer) bounding ellipses that are used to build sectors along the
elliptical path. These sectors are used as areas for integrating
pixel values, when the area integration mode (mean or median) is
used.
This class also keeps track of where in the ellipse we are when
performing an 'extract' operation. This is mostly relevant when
using an area integration mode (as opposed to a pixel integration
mode)
Parameters
----------
x0, y0 : float
The center pixel coordinate of the ellipse.
sma : float
The semimajor axis of the ellipse in pixels.
eps : ellipticity
The ellipticity of the ellipse.
pa : float
The position angle (in radians) of the semimajor axis in
relation to the postive x axis of the image array (rotating
towards the positive y axis). Position angles are defined in the
range :math:`0 < PA <= \\pi`. Avoid using as starting position
angle of 0., since the fit algorithm may not work properly. When
the ellipses are such that position angles are near either
extreme of the range, noise can make the solution jump back and
forth between successive isophotes, by amounts close to 180
degrees.
astep : float, optional
The step value for growing/shrinking the semimajor axis. It can
be expressed either in pixels (when ``linear_growth=True``) or
as a relative value (when ``linear_growth=False``). The default
is 0.1.
linear_growth : bool, optional
The semimajor axis growing/shrinking mode. The default is
`False`.
fix_center : bool, optional
Keep center of ellipse fixed during fit? The default is False.
fix_pa : bool, optional
Keep position angle of semi-major axis of ellipse fixed during fit?
The default is False.
fix_eps : bool, optional
Keep ellipticity of ellipse fixed during fit? The default is False.
"""
def __init__(self, x0, y0, sma, eps, pa, astep=0.1, linear_growth=False,
fix_center=False, fix_pa=False, fix_eps=False):
self.x0 = x0
self.y0 = y0
self.sma = sma
self.eps = eps
self.pa = pa
self.astep = astep
self.linear_growth = linear_growth
# Fixed parameters are flagged in here. Note that the
# ordering must follow the same ordering used in the
# fitter._CORRECTORS list.
self.fix = np.array([fix_center, fix_center, fix_pa, fix_eps])
# limits for sector angular width
self._phi_min = 0.05
self._phi_max = 0.2
# variables used in the calculation of the sector angular width
sma1, sma2 = self.bounding_ellipses()
inner_sma = min((sma2 - sma1), 3.)
self._area_factor = (sma2 - sma1) * inner_sma
# sma can eventually be zero!
if self.sma > 0.:
self.sector_angular_width = max(min((inner_sma / self.sma),
self._phi_max), self._phi_min)
self.initial_polar_angle = self.sector_angular_width / 2.
self.initial_polar_radius = self.radius(self.initial_polar_angle)
def find_center(self, image, threshold=0.1, verbose=True):
"""
Find the center of a galaxy.
If the algorithm is successful the (x, y) coordinates in this
`~photutils.isophote.EllipseGeometry` (i.e., the ``x0`` and
``y0`` attributes) instance will be modified.
The isophote fit algorithm requires an initial guess for the
galaxy center (x, y) coordinates and these coordinates must be
close to the actual galaxy center for the isophote fit to work.
This method provides can provide an initial guess for the galaxy
center coordinates. See the **Notes** section below for more
details.
Parameters
----------
image : 2D `~numpy.ndarray`
The image array. Masked arrays are not recognized here. This
assumes that centering should always be done on valid pixels.
threshold : float, optional
The centerer threshold. To turn off the centerer, set this
to a large value (i.e., >> 1). The default is 0.1.
verbose : bool, optional
Whether to print object centering information. The default is
`True`.
Notes
-----
The centerer function scans a 10x10 window centered on the (x,
y) coordinates in the `~photutils.isophote.EllipseGeometry`
instance passed to the constructor of the
`~photutils.isophote.Ellipse` class. If any of the
`~photutils.isophote.EllipseGeometry` (x, y) coordinates are
`None`, the center of the input image frame is used. If the
center acquisition is successful, the
`~photutils.isophote.EllipseGeometry` instance is modified in
place to reflect the solution of the object centerer algorithm.
In some cases the object centerer algorithm may fail even though
there is enough signal-to-noise to start a fit (e.g., objects
with very high ellipticity). In those cases the sensitivity
of the algorithm can be decreased by decreasing the value of
the object centerer threshold parameter. The centerer works by
looking where a quantity akin to a signal-to-noise ratio is
maximized within the 10x10 window. The centerer can thus be shut
off entirely by setting the threshold to a large value (i.e.,
>> 1; meaning no location inside the search window will achieve
that signal-to-noise ratio).
"""
self._centerer_mask_half_size = len(IN_MASK) / 2
self.centerer_threshold = threshold
# number of pixels in each mask
sz = len(IN_MASK)
self._centerer_ones_in = np.ma.masked_array(np.ones(shape=(sz, sz)),
mask=IN_MASK)
self._centerer_ones_out = np.ma.masked_array(np.ones(shape=(sz, sz)),
mask=OUT_MASK)
self._centerer_in_mask_npix = np.sum(self._centerer_ones_in)
self._centerer_out_mask_npix = np.sum(self._centerer_ones_out)
# Check if center coordinates point to somewhere inside the frame.
# If not, set then to frame center.
shape = image.shape
_x0 = self.x0
_y0 = self.y0
if (_x0 is None or _x0 < 0 or _x0 >= shape[1] or _y0 is None or
_y0 < 0 or _y0 >= shape[0]):
_x0 = shape[1] / 2
_y0 = shape[0] / 2
max_fom = 0.
max_i = 0
max_j = 0
# scan all positions inside window
window_half_size = 5
for i in range(int(_x0 - window_half_size),
int(_x0 + window_half_size) + 1):
for j in range(int(_y0 - window_half_size),
int(_y0 + window_half_size) + 1):
# ensure that it stays inside image frame
i1 = int(max(0, i - self._centerer_mask_half_size))
j1 = int(max(0, j - self._centerer_mask_half_size))
i2 = int(min(shape[1] - 1, i + self._centerer_mask_half_size))
j2 = int(min(shape[0] - 1, j + self._centerer_mask_half_size))
window = image[j1:j2, i1:i2]
# averages in inner and outer regions.
inner = np.ma.masked_array(window, mask=IN_MASK)
outer = np.ma.masked_array(window, mask=OUT_MASK)
inner_avg = np.sum(inner) / self._centerer_in_mask_npix
outer_avg = np.sum(outer) / self._centerer_out_mask_npix
# standard deviation and figure of merit
inner_std = np.std(inner)
outer_std = np.std(outer)
stddev = np.sqrt(inner_std**2 + outer_std**2)
fom = (inner_avg - outer_avg) / stddev
if fom > max_fom:
max_fom = fom
max_i = i
max_j = j
# figure of merit > threshold: update geometry with new coordinates.
if max_fom > threshold:
self.x0 = float(max_i)
self.y0 = float(max_j)
if verbose:
log.info(f'Found center at x0 = {self.x0:5.1f}, '
'y0 = {self.y0:5.1f}')
else:
if verbose:
log.info('Result is below the threshold -- keeping the '
'original coordinates.')
def radius(self, angle):
"""
Calculate the polar radius for a given polar angle.
Parameters
----------
angle : float
The polar angle (radians).
Returns
-------
radius : float
The polar radius (pixels).
"""
return (self.sma * (1. - self.eps) /
np.sqrt(((1. - self.eps) * np.cos(angle))**2 +
(np.sin(angle))**2))
def initialize_sector_geometry(self, phi):
"""
Initialize geometry attributes associated with an elliptical
sector at the given polar angle ``phi``.
This function computes:
* the four vertices that define the elliptical sector on the
pixel array.
* the sector area (saved in the ``sector_area`` attribute)
* the sector angular width (saved in ``sector_angular_width``
attribute)
Parameters
----------
phi : float
The polar angle (radians) where the sector is located.
Returns
-------
x, y : 1D `~numpy.ndarray`
The x and y coordinates of each vertex as 1D arrays.
"""
# These polar radii bound the region between the inner
# and outer ellipses that define the sector.
sma1, sma2 = self.bounding_ellipses()
eps_ = 1. - self.eps
# polar vector at one side of the elliptical sector
self._phi1 = phi - self.sector_angular_width / 2.
r1 = (sma1 * eps_ / math.sqrt((eps_ * math.cos(self._phi1))**2
+ (math.sin(self._phi1))**2))
r2 = (sma2 * eps_ / math.sqrt((eps_ * math.cos(self._phi1))**2
+ (math.sin(self._phi1))**2))
# polar vector at the other side of the elliptical sector
self._phi2 = phi + self.sector_angular_width / 2.
r3 = (sma2 * eps_ / math.sqrt((eps_ * math.cos(self._phi2))**2
+ (math.sin(self._phi2))**2))
r4 = (sma1 * eps_ / math.sqrt((eps_ * math.cos(self._phi2))**2
+ (math.sin(self._phi2))**2))
# sector area
sa1 = _area(sma1, self.eps, self._phi1, r1)
sa2 = _area(sma2, self.eps, self._phi1, r2)
sa3 = _area(sma2, self.eps, self._phi2, r3)
sa4 = _area(sma1, self.eps, self._phi2, r4)
self.sector_area = abs((sa3 - sa2) - (sa4 - sa1))
# angular width of sector. It is calculated such that the sectors
# come out with roughly constant area along the ellipse.
self.sector_angular_width = max(min((self._area_factor / (r3 - r4) /
r4), self._phi_max),
self._phi_min)
# compute the 4 vertices that define the elliptical sector.
vertex_x = np.zeros(shape=4, dtype=float)
vertex_y = np.zeros(shape=4, dtype=float)
# vertices are labelled in counterclockwise sequence
vertex_x[0:2] = np.array([r1, r2]) * math.cos(self._phi1 + self.pa)
vertex_x[2:4] = np.array([r4, r3]) * math.cos(self._phi2 + self.pa)
vertex_y[0:2] = np.array([r1, r2]) * math.sin(self._phi1 + self.pa)
vertex_y[2:4] = np.array([r4, r3]) * math.sin(self._phi2 + self.pa)
vertex_x += self.x0
vertex_y += self.y0
return vertex_x, vertex_y
def bounding_ellipses(self):
"""
Compute the semimajor axis of the two ellipses that bound the
annulus where integrations take place.
Returns
-------
sma1, sma2 : float
The smaller and larger values of semimajor axis length that
define the annulus bounding ellipses.
"""
if self.linear_growth:
a1 = self.sma - self.astep / 2.
a2 = self.sma + self.astep / 2.
else:
a1 = self.sma * (1. - self.astep / 2.)
a2 = self.sma * (1. + self.astep / 2.)
return a1, a2
def polar_angle_sector_limits(self):
"""
Return the two polar angles that bound the sector.
The two bounding polar angles become available only after
calling the
:meth:`~photutils.isophote.EllipseGeometry.initialize_sector_geometry`
method.
Returns
-------
phi1, phi2 : float
The smaller and larger values of polar angle that bound the
current sector.
"""
return self._phi1, self._phi2
def to_polar(self, x, y):
"""
Return the radius and polar angle in the ellipse coordinate
system given (x, y) pixel image coordinates.
This function takes care of the different definitions for
position angle (PA) and polar angle (phi):
.. math::
-\\pi < PA < \\pi
0 < phi < 2 \\pi
Note that radius can be anything. The solution is not tied to
the semimajor axis length, but to the center position and tilt
angle.
Parameters
----------
x, y : float
The (x, y) image coordinates.
Returns
-------
radius, angle : float
The ellipse radius and polar angle.
"""
# We split in between a scalar version and a
# vectorized version. This is necessary for
# now so we don't pay a heavy speed penalty
# that is incurred when using vectorized code.
# The split in two separate functions helps in
# the profiling analysis: most of the time is
# spent in the scalar function.
if isinstance(x, (int, float)):
return self._to_polar_scalar(x, y)
else:
return self._to_polar_vectorized(x, y)
def _to_polar_scalar(self, x, y):
x1 = x - self.x0
y1 = y - self.y0
radius = x1**2 + y1**2
if radius > 0.0:
radius = math.sqrt(radius)
angle = math.asin(abs(y1) / radius)
else:
radius = 0.
angle = 1.
if x1 >= 0. and y1 < 0.:
angle = 2*np.pi - angle
elif x1 < 0. and y1 >= 0.:
angle = np.pi - angle
elif x1 < 0. and y1 < 0.:
angle = np.pi + angle
pa1 = self.pa
if self.pa < 0.:
pa1 = self.pa + 2*np.pi
angle = angle - pa1
if angle < 0.:
angle = angle + 2*np.pi
return radius, angle
def _to_polar_vectorized(self, x, y):
x1 = np.atleast_2d(x) - self.x0
y1 = np.atleast_2d(y) - self.y0
radius = x1**2 + y1**2
angle = np.ones(radius.shape)
imask = (radius > 0.0)
radius[imask] = np.sqrt(radius[imask])
angle[imask] = np.arcsin(np.abs(y1[imask]) / radius[imask])
radius[~imask] = 0.
angle[~imask] = 1.
idx = (x1 >= 0.) & (y1 < 0)
angle[idx] = 2*np.pi - angle[idx]
idx = (x1 < 0.) & (y1 >= 0.)
angle[idx] = np.pi - angle[idx]
idx = (x1 < 0.) & (y1 < 0.)
angle[idx] = np.pi + angle[idx]
pa1 = self.pa
if self.pa < 0.:
pa1 = self.pa + 2*np.pi
angle = angle - pa1
angle[angle < 0] += 2*np.pi
return radius, angle
def update_sma(self, step):
"""
Calculate an updated value for the semimajor axis, given the
current value and the step value.
The step value must be managed by the caller to support both
modes: grow outwards and shrink inwards.
Parameters
----------
step : float
The step value.
Returns
-------
sma : float
The new semimajor axis length.
"""
if self.linear_growth:
sma = self.sma + step
else:
sma = self.sma * (1. + step)
return sma
def reset_sma(self, step):
"""
Change the direction of semimajor axis growth, from outwards to
inwards.
Parameters
----------
step : float
The current step value.
Returns
-------
sma, new_step : float
The new semimajor axis length and the new step value to
initiate the shrinking of the semimajor axis length. This is
the step value that should be used when calling the
:meth:`~photutils.isophote.EllipseGeometry.update_sma`
method.
"""
if self.linear_growth:
sma = self.sma - step
step = -step
else:
aux = 1. / (1. + step)
sma = self.sma * aux
step = aux - 1.
return sma, step
| [
"numpy.atleast_2d",
"numpy.sum",
"numpy.abs",
"math.sqrt",
"numpy.std",
"numpy.zeros",
"numpy.ones",
"math.sin",
"astropy.log.info",
"math.acos",
"numpy.sin",
"numpy.array",
"math.cos",
"numpy.ma.masked_array",
"numpy.cos",
"numpy.sqrt"
] | [((5058, 5109), 'numpy.array', 'np.array', (['[fix_center, fix_center, fix_pa, fix_eps]'], {}), '([fix_center, fix_center, fix_pa, fix_eps])\n', (5066, 5109), True, 'import numpy as np\n'), ((8776, 8806), 'numpy.sum', 'np.sum', (['self._centerer_ones_in'], {}), '(self._centerer_ones_in)\n', (8782, 8806), True, 'import numpy as np\n'), ((8846, 8877), 'numpy.sum', 'np.sum', (['self._centerer_ones_out'], {}), '(self._centerer_ones_out)\n', (8852, 8877), True, 'import numpy as np\n'), ((14110, 14140), 'numpy.zeros', 'np.zeros', ([], {'shape': '(4)', 'dtype': 'float'}), '(shape=4, dtype=float)\n', (14118, 14140), True, 'import numpy as np\n'), ((14160, 14190), 'numpy.zeros', 'np.zeros', ([], {'shape': '(4)', 'dtype': 'float'}), '(shape=4, dtype=float)\n', (14168, 14190), True, 'import numpy as np\n'), ((17904, 17925), 'numpy.ones', 'np.ones', (['radius.shape'], {}), '(radius.shape)\n', (17911, 17925), True, 'import numpy as np\n'), ((17982, 18004), 'numpy.sqrt', 'np.sqrt', (['radius[imask]'], {}), '(radius[imask])\n', (17989, 18004), True, 'import numpy as np\n'), ((2109, 2122), 'math.cos', 'math.cos', (['phi'], {}), '(phi)\n', (2117, 2122), False, 'import math\n'), ((2241, 2255), 'math.acos', 'math.acos', (['aux'], {}), '(aux)\n', (2250, 2255), False, 'import math\n'), ((8501, 8524), 'numpy.ones', 'np.ones', ([], {'shape': '(sz, sz)'}), '(shape=(sz, sz))\n', (8508, 8524), True, 'import numpy as np\n'), ((8645, 8668), 'numpy.ones', 'np.ones', ([], {'shape': '(sz, sz)'}), '(shape=(sz, sz))\n', (8652, 8668), True, 'import numpy as np\n'), ((14277, 14295), 'numpy.array', 'np.array', (['[r1, r2]'], {}), '([r1, r2])\n', (14285, 14295), True, 'import numpy as np\n'), ((14298, 14328), 'math.cos', 'math.cos', (['(self._phi1 + self.pa)'], {}), '(self._phi1 + self.pa)\n', (14306, 14328), False, 'import math\n'), ((14353, 14371), 'numpy.array', 'np.array', (['[r4, r3]'], {}), '([r4, r3])\n', (14361, 14371), True, 'import numpy as np\n'), ((14374, 14404), 'math.cos', 'math.cos', (['(self._phi2 + self.pa)'], {}), '(self._phi2 + self.pa)\n', (14382, 14404), False, 'import math\n'), ((14429, 14447), 'numpy.array', 'np.array', (['[r1, r2]'], {}), '([r1, r2])\n', (14437, 14447), True, 'import numpy as np\n'), ((14450, 14480), 'math.sin', 'math.sin', (['(self._phi1 + self.pa)'], {}), '(self._phi1 + self.pa)\n', (14458, 14480), False, 'import math\n'), ((14505, 14523), 'numpy.array', 'np.array', (['[r4, r3]'], {}), '([r4, r3])\n', (14513, 14523), True, 'import numpy as np\n'), ((14526, 14556), 'math.sin', 'math.sin', (['(self._phi2 + self.pa)'], {}), '(self._phi2 + self.pa)\n', (14534, 14556), False, 'import math\n'), ((17197, 17214), 'math.sqrt', 'math.sqrt', (['radius'], {}), '(radius)\n', (17206, 17214), False, 'import math\n'), ((17789, 17805), 'numpy.atleast_2d', 'np.atleast_2d', (['x'], {}), '(x)\n', (17802, 17805), True, 'import numpy as np\n'), ((17829, 17845), 'numpy.atleast_2d', 'np.atleast_2d', (['y'], {}), '(y)\n', (17842, 17845), True, 'import numpy as np\n'), ((10085, 10125), 'numpy.ma.masked_array', 'np.ma.masked_array', (['window'], {'mask': 'IN_MASK'}), '(window, mask=IN_MASK)\n', (10103, 10125), True, 'import numpy as np\n'), ((10150, 10191), 'numpy.ma.masked_array', 'np.ma.masked_array', (['window'], {'mask': 'OUT_MASK'}), '(window, mask=OUT_MASK)\n', (10168, 10191), True, 'import numpy as np\n'), ((10423, 10436), 'numpy.std', 'np.std', (['inner'], {}), '(inner)\n', (10429, 10436), True, 'import numpy as np\n'), ((10465, 10478), 'numpy.std', 'np.std', (['outer'], {}), '(outer)\n', (10471, 10478), True, 'import numpy as np\n'), ((10504, 10544), 'numpy.sqrt', 'np.sqrt', (['(inner_std ** 2 + outer_std ** 2)'], {}), '(inner_std ** 2 + outer_std ** 2)\n', (10511, 10544), True, 'import numpy as np\n'), ((10947, 11018), 'astropy.log.info', 'log.info', (['f"""Found center at x0 = {self.x0:5.1f}, y0 = {{self.y0:5.1f}}"""'], {}), "(f'Found center at x0 = {self.x0:5.1f}, y0 = {{self.y0:5.1f}}')\n", (10955, 11018), False, 'from astropy import log\n'), ((11099, 11177), 'astropy.log.info', 'log.info', (['"""Result is below the threshold -- keeping the original coordinates."""'], {}), "('Result is below the threshold -- keeping the original coordinates.')\n", (11107, 11177), False, 'from astropy import log\n'), ((18038, 18055), 'numpy.abs', 'np.abs', (['y1[imask]'], {}), '(y1[imask])\n', (18044, 18055), True, 'import numpy as np\n'), ((10220, 10233), 'numpy.sum', 'np.sum', (['inner'], {}), '(inner)\n', (10226, 10233), True, 'import numpy as np\n'), ((10292, 10305), 'numpy.sum', 'np.sum', (['outer'], {}), '(outer)\n', (10298, 10305), True, 'import numpy as np\n'), ((11649, 11662), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (11655, 11662), True, 'import numpy as np\n'), ((12825, 12845), 'math.sin', 'math.sin', (['self._phi1'], {}), '(self._phi1)\n', (12833, 12845), False, 'import math\n'), ((12964, 12984), 'math.sin', 'math.sin', (['self._phi1'], {}), '(self._phi1)\n', (12972, 12984), False, 'import math\n'), ((13228, 13248), 'math.sin', 'math.sin', (['self._phi2'], {}), '(self._phi2)\n', (13236, 13248), False, 'import math\n'), ((13368, 13388), 'math.sin', 'math.sin', (['self._phi2'], {}), '(self._phi2)\n', (13376, 13388), False, 'import math\n'), ((11604, 11617), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (11610, 11617), True, 'import numpy as np\n'), ((12759, 12779), 'math.cos', 'math.cos', (['self._phi1'], {}), '(self._phi1)\n', (12767, 12779), False, 'import math\n'), ((12898, 12918), 'math.cos', 'math.cos', (['self._phi1'], {}), '(self._phi1)\n', (12906, 12918), False, 'import math\n'), ((13162, 13182), 'math.cos', 'math.cos', (['self._phi2'], {}), '(self._phi2)\n', (13170, 13182), False, 'import math\n'), ((13302, 13322), 'math.cos', 'math.cos', (['self._phi2'], {}), '(self._phi2)\n', (13310, 13322), False, 'import math\n')] |
"""
Functions used for filtering data, or modifying existing filters.
"""
import numpy as np
from latools.helpers.signal import bool_2_indices
from latools.helpers.stat_fns import nominal_values
def threshold(values, threshold):
"""
Return boolean arrays where a >= and < threshold.
Parameters
----------
values : array-like
Array of real values.
threshold : float
Threshold value
Returns
-------
(below, above) : tuple or boolean arrays
"""
values = nominal_values(values)
return (values < threshold, values >= threshold)
# Additional filter functions
def exclude_downhole(filt, threshold=2):
"""
Exclude all data after the first excluded portion.
This makes sense for spot measurements where, because
of the signal mixing inherent in LA-ICPMS, once a
contaminant is ablated, it will always be present to
some degree in signals from further down the ablation
pit.
Parameters
----------
filt : boolean array
threshold : int
Returns
-------
filter : boolean array
"""
cfilt = filt.copy()
inds = bool_2_indices(~filt)
rem = (np.diff(inds) >= threshold)[:, 0]
if any(rem):
if inds[rem].shape[0] > 1:
limit = inds[rem][1, 0]
cfilt[limit:] = False
return cfilt
def defrag(filt, threshold=3, mode='include'):
"""
'Defragment' a filter.
Parameters
----------
filt : boolean array
A filter
threshold : int
Consecutive values equal to or below this threshold
length are considered fragments, and will be removed.
mode : str
Wheter to change False fragments to True ('include')
or True fragments to False ('exclude')
Returns
-------
defragmented filter : boolean array
"""
if bool_2_indices(filt) is None:
return filt
if mode == 'include':
inds = bool_2_indices(~filt) + 1
rep = True
if mode == 'exclude':
inds = bool_2_indices(filt) + 1
rep = False
rem = (np.diff(inds) <= threshold)[:, 0]
cfilt = filt.copy()
if any(rem):
for lo, hi in inds[rem]:
cfilt[lo:hi] = rep
return cfilt
def trim(ind, start=1, end=0):
"""
Remove points from the start and end of True regions.
Parameters
----------
start, end : int
The number of points to remove from the start and end of
the specified filter.
ind : boolean array
Which filter to trim. If True, applies to currently active
filters.
"""
return np.roll(ind, start) & np.roll(ind, -end)
| [
"latools.helpers.stat_fns.nominal_values",
"numpy.diff",
"latools.helpers.signal.bool_2_indices",
"numpy.roll"
] | [((519, 541), 'latools.helpers.stat_fns.nominal_values', 'nominal_values', (['values'], {}), '(values)\n', (533, 541), False, 'from latools.helpers.stat_fns import nominal_values\n'), ((1139, 1160), 'latools.helpers.signal.bool_2_indices', 'bool_2_indices', (['(~filt)'], {}), '(~filt)\n', (1153, 1160), False, 'from latools.helpers.signal import bool_2_indices\n'), ((1853, 1873), 'latools.helpers.signal.bool_2_indices', 'bool_2_indices', (['filt'], {}), '(filt)\n', (1867, 1873), False, 'from latools.helpers.signal import bool_2_indices\n'), ((2623, 2642), 'numpy.roll', 'np.roll', (['ind', 'start'], {}), '(ind, start)\n', (2630, 2642), True, 'import numpy as np\n'), ((2645, 2663), 'numpy.roll', 'np.roll', (['ind', '(-end)'], {}), '(ind, -end)\n', (2652, 2663), True, 'import numpy as np\n'), ((1173, 1186), 'numpy.diff', 'np.diff', (['inds'], {}), '(inds)\n', (1180, 1186), True, 'import numpy as np\n'), ((1945, 1966), 'latools.helpers.signal.bool_2_indices', 'bool_2_indices', (['(~filt)'], {}), '(~filt)\n', (1959, 1966), False, 'from latools.helpers.signal import bool_2_indices\n'), ((2031, 2051), 'latools.helpers.signal.bool_2_indices', 'bool_2_indices', (['filt'], {}), '(filt)\n', (2045, 2051), False, 'from latools.helpers.signal import bool_2_indices\n'), ((2088, 2101), 'numpy.diff', 'np.diff', (['inds'], {}), '(inds)\n', (2095, 2101), True, 'import numpy as np\n')] |
import collections
import sys
import unittest
import example_robot_data
import numpy as np
import crocoddyl
import pinocchio
from crocoddyl.utils import Contact3DDerived, Contact6DDerived
pinocchio.switchToNumpyMatrix()
class ContactModelAbstractTestCase(unittest.TestCase):
ROBOT_MODEL = None
ROBOT_STATE = None
CONTACT = None
CONTACT_DER = None
def setUp(self):
self.x = self.ROBOT_STATE.rand()
self.robot_data = self.ROBOT_MODEL.createData()
self.data = self.CONTACT.createData(self.robot_data)
self.data_der = self.CONTACT_DER.createData(self.robot_data)
nq, nv = self.ROBOT_MODEL.nq, self.ROBOT_MODEL.nv
pinocchio.forwardKinematics(self.ROBOT_MODEL, self.robot_data, self.x[:nq], self.x[nq:],
pinocchio.utils.zero(nv))
pinocchio.computeJointJacobians(self.ROBOT_MODEL, self.robot_data)
pinocchio.updateFramePlacements(self.ROBOT_MODEL, self.robot_data)
pinocchio.computeForwardKinematicsDerivatives(self.ROBOT_MODEL, self.robot_data, self.x[:nq], self.x[nq:],
pinocchio.utils.zero(nv))
def test_nc_dimension(self):
self.assertEqual(self.CONTACT.nc, self.CONTACT_DER.nc, "Wrong nc.")
def test_calc(self):
# Run calc for both action models
self.CONTACT.calc(self.data, self.x)
self.CONTACT_DER.calc(self.data_der, self.x)
# Checking the cost value and its residual
self.assertTrue(np.allclose(self.data.Jc, self.data_der.Jc, atol=1e-9), "Wrong contact Jacobian (Jc).")
self.assertTrue(np.allclose(self.data.a0, self.data_der.a0, atol=1e-9), "Wrong drift acceleration (a0).")
def test_calcDiff(self):
# Run calc for both action models
self.CONTACT.calcDiff(self.data, self.x, True)
self.CONTACT_DER.calcDiff(self.data_der, self.x, True)
# Checking the Jacobians of the contact constraint
self.assertTrue(np.allclose(self.data.da0_dx, self.data_der.da0_dx, atol=1e-9),
"Wrong derivatives of the desired contact acceleration (da0_dx).")
class ContactModelMultipleAbstractTestCase(unittest.TestCase):
ROBOT_MODEL = None
ROBOT_STATE = None
CONTACTS = None
def setUp(self):
self.x = self.ROBOT_STATE.rand()
self.robot_data = self.ROBOT_MODEL.createData()
self.contactSum = crocoddyl.ContactModelMultiple(self.ROBOT_STATE)
self.datas = collections.OrderedDict([[name, contact.createData(self.robot_data)]
for name, contact in self.CONTACTS.items()])
for name, contact in self.CONTACTS.items():
self.contactSum.addContact(name, contact)
self.dataSum = self.contactSum.createData(self.robot_data)
nq, nv = self.ROBOT_MODEL.nq, self.ROBOT_MODEL.nv
pinocchio.forwardKinematics(self.ROBOT_MODEL, self.robot_data, self.x[:nq], self.x[nq:],
pinocchio.utils.zero(nv))
pinocchio.computeJointJacobians(self.ROBOT_MODEL, self.robot_data)
pinocchio.updateFramePlacements(self.ROBOT_MODEL, self.robot_data)
pinocchio.computeForwardKinematicsDerivatives(self.ROBOT_MODEL, self.robot_data, self.x[:nq], self.x[nq:],
pinocchio.utils.zero(nv))
def test_nc_dimension(self):
nc = sum([contact.nc for contact in self.CONTACTS.values()])
self.assertEqual(self.contactSum.nc, nc, "Wrong nc.")
def test_calc(self):
# Run calc for both action models
for contact, data in zip(self.CONTACTS.values(), self.datas.values()):
contact.calc(data, self.x)
self.contactSum.calc(self.dataSum, self.x)
# Checking the cost value and its residual
Jc = np.vstack([data.Jc for data in self.datas.values()])
a0 = np.vstack([data.a0 for data in self.datas.values()])
self.assertTrue(np.allclose(self.dataSum.Jc, Jc, atol=1e-9), "Wrong contact Jacobian (Jc).")
self.assertTrue(np.allclose(self.dataSum.a0, a0, atol=1e-9), "Wrong drift acceleration (a0).")
def test_calcDiff(self):
# Run calc for both action models
for contact, data in zip(self.CONTACTS.values(), self.datas.values()):
contact.calcDiff(data, self.x, True)
self.contactSum.calcDiff(self.dataSum, self.x, True)
# Checking the Jacobians of the contact constraint
da0_dx = np.vstack([data.da0_dx for data in self.datas.values()])
self.assertTrue(np.allclose(self.dataSum.da0_dx, da0_dx, atol=1e-9),
"Wrong derivatives of the desired contact acceleration (da0_dx).")
class Contact3DTest(ContactModelAbstractTestCase):
ROBOT_MODEL = example_robot_data.loadHyQ().model
ROBOT_STATE = crocoddyl.StateMultibody(ROBOT_MODEL)
gains = pinocchio.utils.rand(2)
xref = crocoddyl.FrameTranslation(ROBOT_MODEL.getFrameId('lf_foot'), pinocchio.SE3.Random().translation)
CONTACT = crocoddyl.ContactModel3D(ROBOT_STATE, xref, gains)
CONTACT_DER = Contact3DDerived(ROBOT_STATE, xref, gains)
class Contact3DMultipleTest(ContactModelMultipleAbstractTestCase):
ROBOT_MODEL = example_robot_data.loadHyQ().model
ROBOT_STATE = crocoddyl.StateMultibody(ROBOT_MODEL)
gains = pinocchio.utils.rand(2)
CONTACTS = collections.OrderedDict(
sorted({
'lf_foot':
crocoddyl.ContactModel3D(
ROBOT_STATE,
crocoddyl.FrameTranslation(ROBOT_MODEL.getFrameId('lf_foot'),
pinocchio.SE3.Random().translation), gains),
'rh_foot':
crocoddyl.ContactModel3D(
ROBOT_STATE,
crocoddyl.FrameTranslation(ROBOT_MODEL.getFrameId('rh_foot'),
pinocchio.SE3.Random().translation), gains)
}.items()))
class Contact6DTest(ContactModelAbstractTestCase):
ROBOT_MODEL = example_robot_data.loadICub().model
ROBOT_STATE = crocoddyl.StateMultibody(ROBOT_MODEL)
gains = pinocchio.utils.rand(2)
Mref = crocoddyl.FramePlacement(ROBOT_MODEL.getFrameId('r_sole'), pinocchio.SE3.Random())
CONTACT = crocoddyl.ContactModel6D(ROBOT_STATE, Mref, gains)
CONTACT_DER = Contact6DDerived(ROBOT_STATE, Mref, gains)
class Contact6DMultipleTest(ContactModelMultipleAbstractTestCase):
ROBOT_MODEL = example_robot_data.loadICub().model
ROBOT_STATE = crocoddyl.StateMultibody(ROBOT_MODEL)
gains = pinocchio.utils.rand(2)
CONTACTS = collections.OrderedDict(
sorted({
'l_foot':
crocoddyl.ContactModel6D(
ROBOT_STATE, crocoddyl.FramePlacement(ROBOT_MODEL.getFrameId('l_sole'), pinocchio.SE3.Random()),
gains),
'r_foot':
crocoddyl.ContactModel6D(
ROBOT_STATE, crocoddyl.FramePlacement(ROBOT_MODEL.getFrameId('r_sole'), pinocchio.SE3.Random()), gains)
}.items()))
if __name__ == '__main__':
test_classes_to_run = [Contact3DTest, Contact3DMultipleTest, Contact6DTest, Contact6DMultipleTest]
loader = unittest.TestLoader()
suites_list = []
for test_class in test_classes_to_run:
suite = loader.loadTestsFromTestCase(test_class)
suites_list.append(suite)
big_suite = unittest.TestSuite(suites_list)
runner = unittest.TextTestRunner()
results = runner.run(big_suite)
sys.exit(not results.wasSuccessful())
| [
"crocoddyl.StateMultibody",
"pinocchio.updateFramePlacements",
"pinocchio.SE3.Random",
"unittest.TextTestRunner",
"unittest.TestSuite",
"example_robot_data.loadICub",
"numpy.allclose",
"crocoddyl.utils.Contact6DDerived",
"crocoddyl.ContactModelMultiple",
"pinocchio.utils.zero",
"unittest.TestLoa... | [((191, 222), 'pinocchio.switchToNumpyMatrix', 'pinocchio.switchToNumpyMatrix', ([], {}), '()\n', (220, 222), False, 'import pinocchio\n'), ((4879, 4916), 'crocoddyl.StateMultibody', 'crocoddyl.StateMultibody', (['ROBOT_MODEL'], {}), '(ROBOT_MODEL)\n', (4903, 4916), False, 'import crocoddyl\n'), ((4930, 4953), 'pinocchio.utils.rand', 'pinocchio.utils.rand', (['(2)'], {}), '(2)\n', (4950, 4953), False, 'import pinocchio\n'), ((5077, 5127), 'crocoddyl.ContactModel3D', 'crocoddyl.ContactModel3D', (['ROBOT_STATE', 'xref', 'gains'], {}), '(ROBOT_STATE, xref, gains)\n', (5101, 5127), False, 'import crocoddyl\n'), ((5146, 5188), 'crocoddyl.utils.Contact3DDerived', 'Contact3DDerived', (['ROBOT_STATE', 'xref', 'gains'], {}), '(ROBOT_STATE, xref, gains)\n', (5162, 5188), False, 'from crocoddyl.utils import Contact3DDerived, Contact6DDerived\n'), ((5329, 5366), 'crocoddyl.StateMultibody', 'crocoddyl.StateMultibody', (['ROBOT_MODEL'], {}), '(ROBOT_MODEL)\n', (5353, 5366), False, 'import crocoddyl\n'), ((5380, 5403), 'pinocchio.utils.rand', 'pinocchio.utils.rand', (['(2)'], {}), '(2)\n', (5400, 5403), False, 'import pinocchio\n'), ((6117, 6154), 'crocoddyl.StateMultibody', 'crocoddyl.StateMultibody', (['ROBOT_MODEL'], {}), '(ROBOT_MODEL)\n', (6141, 6154), False, 'import crocoddyl\n'), ((6168, 6191), 'pinocchio.utils.rand', 'pinocchio.utils.rand', (['(2)'], {}), '(2)\n', (6188, 6191), False, 'import pinocchio\n'), ((6300, 6350), 'crocoddyl.ContactModel6D', 'crocoddyl.ContactModel6D', (['ROBOT_STATE', 'Mref', 'gains'], {}), '(ROBOT_STATE, Mref, gains)\n', (6324, 6350), False, 'import crocoddyl\n'), ((6369, 6411), 'crocoddyl.utils.Contact6DDerived', 'Contact6DDerived', (['ROBOT_STATE', 'Mref', 'gains'], {}), '(ROBOT_STATE, Mref, gains)\n', (6385, 6411), False, 'from crocoddyl.utils import Contact3DDerived, Contact6DDerived\n'), ((6553, 6590), 'crocoddyl.StateMultibody', 'crocoddyl.StateMultibody', (['ROBOT_MODEL'], {}), '(ROBOT_MODEL)\n', (6577, 6590), False, 'import crocoddyl\n'), ((6604, 6627), 'pinocchio.utils.rand', 'pinocchio.utils.rand', (['(2)'], {}), '(2)\n', (6624, 6627), False, 'import pinocchio\n'), ((7227, 7248), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (7246, 7248), False, 'import unittest\n'), ((7420, 7451), 'unittest.TestSuite', 'unittest.TestSuite', (['suites_list'], {}), '(suites_list)\n', (7438, 7451), False, 'import unittest\n'), ((7465, 7490), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {}), '()\n', (7488, 7490), False, 'import unittest\n'), ((843, 909), 'pinocchio.computeJointJacobians', 'pinocchio.computeJointJacobians', (['self.ROBOT_MODEL', 'self.robot_data'], {}), '(self.ROBOT_MODEL, self.robot_data)\n', (874, 909), False, 'import pinocchio\n'), ((918, 984), 'pinocchio.updateFramePlacements', 'pinocchio.updateFramePlacements', (['self.ROBOT_MODEL', 'self.robot_data'], {}), '(self.ROBOT_MODEL, self.robot_data)\n', (949, 984), False, 'import pinocchio\n'), ((2438, 2486), 'crocoddyl.ContactModelMultiple', 'crocoddyl.ContactModelMultiple', (['self.ROBOT_STATE'], {}), '(self.ROBOT_STATE)\n', (2468, 2486), False, 'import crocoddyl\n'), ((3067, 3133), 'pinocchio.computeJointJacobians', 'pinocchio.computeJointJacobians', (['self.ROBOT_MODEL', 'self.robot_data'], {}), '(self.ROBOT_MODEL, self.robot_data)\n', (3098, 3133), False, 'import pinocchio\n'), ((3142, 3208), 'pinocchio.updateFramePlacements', 'pinocchio.updateFramePlacements', (['self.ROBOT_MODEL', 'self.robot_data'], {}), '(self.ROBOT_MODEL, self.robot_data)\n', (3173, 3208), False, 'import pinocchio\n'), ((4826, 4854), 'example_robot_data.loadHyQ', 'example_robot_data.loadHyQ', ([], {}), '()\n', (4852, 4854), False, 'import example_robot_data\n'), ((5276, 5304), 'example_robot_data.loadHyQ', 'example_robot_data.loadHyQ', ([], {}), '()\n', (5302, 5304), False, 'import example_robot_data\n'), ((6063, 6092), 'example_robot_data.loadICub', 'example_robot_data.loadICub', ([], {}), '()\n', (6090, 6092), False, 'import example_robot_data\n'), ((6262, 6284), 'pinocchio.SE3.Random', 'pinocchio.SE3.Random', ([], {}), '()\n', (6282, 6284), False, 'import pinocchio\n'), ((6499, 6528), 'example_robot_data.loadICub', 'example_robot_data.loadICub', ([], {}), '()\n', (6526, 6528), False, 'import example_robot_data\n'), ((809, 833), 'pinocchio.utils.zero', 'pinocchio.utils.zero', (['nv'], {}), '(nv)\n', (829, 833), False, 'import pinocchio\n'), ((1154, 1178), 'pinocchio.utils.zero', 'pinocchio.utils.zero', (['nv'], {}), '(nv)\n', (1174, 1178), False, 'import pinocchio\n'), ((1531, 1586), 'numpy.allclose', 'np.allclose', (['self.data.Jc', 'self.data_der.Jc'], {'atol': '(1e-09)'}), '(self.data.Jc, self.data_der.Jc, atol=1e-09)\n', (1542, 1586), True, 'import numpy as np\n'), ((1643, 1698), 'numpy.allclose', 'np.allclose', (['self.data.a0', 'self.data_der.a0'], {'atol': '(1e-09)'}), '(self.data.a0, self.data_der.a0, atol=1e-09)\n', (1654, 1698), True, 'import numpy as np\n'), ((2006, 2069), 'numpy.allclose', 'np.allclose', (['self.data.da0_dx', 'self.data_der.da0_dx'], {'atol': '(1e-09)'}), '(self.data.da0_dx, self.data_der.da0_dx, atol=1e-09)\n', (2017, 2069), True, 'import numpy as np\n'), ((3033, 3057), 'pinocchio.utils.zero', 'pinocchio.utils.zero', (['nv'], {}), '(nv)\n', (3053, 3057), False, 'import pinocchio\n'), ((3378, 3402), 'pinocchio.utils.zero', 'pinocchio.utils.zero', (['nv'], {}), '(nv)\n', (3398, 3402), False, 'import pinocchio\n'), ((4013, 4057), 'numpy.allclose', 'np.allclose', (['self.dataSum.Jc', 'Jc'], {'atol': '(1e-09)'}), '(self.dataSum.Jc, Jc, atol=1e-09)\n', (4024, 4057), True, 'import numpy as np\n'), ((4114, 4158), 'numpy.allclose', 'np.allclose', (['self.dataSum.a0', 'a0'], {'atol': '(1e-09)'}), '(self.dataSum.a0, a0, atol=1e-09)\n', (4125, 4158), True, 'import numpy as np\n'), ((4611, 4663), 'numpy.allclose', 'np.allclose', (['self.dataSum.da0_dx', 'da0_dx'], {'atol': '(1e-09)'}), '(self.dataSum.da0_dx, da0_dx, atol=1e-09)\n', (4622, 4663), True, 'import numpy as np\n'), ((5027, 5049), 'pinocchio.SE3.Random', 'pinocchio.SE3.Random', ([], {}), '()\n', (5047, 5049), False, 'import pinocchio\n'), ((6833, 6855), 'pinocchio.SE3.Random', 'pinocchio.SE3.Random', ([], {}), '()\n', (6853, 6855), False, 'import pinocchio\n'), ((7030, 7052), 'pinocchio.SE3.Random', 'pinocchio.SE3.Random', ([], {}), '()\n', (7050, 7052), False, 'import pinocchio\n'), ((5672, 5694), 'pinocchio.SE3.Random', 'pinocchio.SE3.Random', ([], {}), '()\n', (5692, 5694), False, 'import pinocchio\n'), ((5928, 5950), 'pinocchio.SE3.Random', 'pinocchio.SE3.Random', ([], {}), '()\n', (5948, 5950), False, 'import pinocchio\n')] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# set list of repos manually
repo = ["ML_Affine_D40_E2", "ML_Affine_D40_E5", "ML_Affine_D40_E10", "ML_Affine_D40_E20"]
# set x labels
x_ticks = [2, 5, 10, 20]
def main():
# todo: load the "results.csv" file from the mia-results directory
# todo: read the data into a list
# todo: plot the Dice coefficients per label (i.e. white matter, gray matter, hippocampus, amygdala, thalamus) in a boxplot
# alternative: instead of manually loading/reading the csv file you could also use the pandas package
# but you will need to install it first ('pip install pandas') and import it to this file ('import pandas as pd')
labels = ['Amygdala', 'GreyMatter', 'Hippocampus', 'Thalamus', 'WhiteMatter']
dice = [[0] * len(labels) for i in range(len(repo))]
hdrfdst = [[0] * len(labels) for i in range(len(repo))]
fig, axs = plt.subplots(2, 5, figsize=(16, 10))
fig.suptitle('Machine Learning optimization Parameter Estimator\nwith constant Depth of 40', fontsize=20)
axs[0, 0].set_ylabel('Dice', fontsize=20)
axs[1, 0].set_ylabel('Hausdorff', fontsize=20)
for n in range(len(repo)):
path = "mia-result/" + repo[n] + "/results.csv"
results = pd.read_csv(path, sep=';')
for i in range(len(labels)):
dice[n][i] = np.mean(results.loc[results['LABEL'] == labels[i]]['DICE'].values.tolist())
hdrfdst[n][i] = np.mean(results.loc[results['LABEL'] == labels[i]]['HDRFDST'].values.tolist())
for i in range(len(labels)):
axs[0, i].plot(x_ticks, [d[i] for d in dice],'r-+')
axs[0, i].set_ylim(0, 1)
axs[0, i].set_title(labels[i], fontsize=16)
axs[0, i].set_xticks(x_ticks)
axs[1, i].plot(x_ticks, [h[i] for h in hdrfdst], 'r-+')
axs[1, i].set_ylim(0, np.max(hdrfdst))
axs[1, i].set_xticks(x_ticks)
plt.savefig("mia-result/plot.png")
plt.show()
if __name__ == '__main__':
main()
| [
"matplotlib.pyplot.show",
"pandas.read_csv",
"numpy.max",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] | [((925, 961), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(5)'], {'figsize': '(16, 10)'}), '(2, 5, figsize=(16, 10))\n', (937, 961), True, 'import matplotlib.pyplot as plt\n'), ((1920, 1954), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""mia-result/plot.png"""'], {}), "('mia-result/plot.png')\n", (1931, 1954), True, 'import matplotlib.pyplot as plt\n'), ((1959, 1969), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1967, 1969), True, 'import matplotlib.pyplot as plt\n'), ((1275, 1301), 'pandas.read_csv', 'pd.read_csv', (['path'], {'sep': '""";"""'}), "(path, sep=';')\n", (1286, 1301), True, 'import pandas as pd\n'), ((1860, 1875), 'numpy.max', 'np.max', (['hdrfdst'], {}), '(hdrfdst)\n', (1866, 1875), True, 'import numpy as np\n')] |
# Import functions and libraries
import cv2
import os
import numpy as np
import matplotlib.pyplot as plt
from scipy.fft import dct, idct
# set image file to ../data/00.bmp
# you are free to point to any other image files
IMG_FILE = os.path.join("..", "data", "zelda.bmp")
# read image file, img is a gray scale image
img_gray = cv2.imread(IMG_FILE, cv2.IMREAD_GRAYSCALE)
print(f"Reading {IMG_FILE}, img size(width,height): {img_gray.shape}")
def dct2(a):
# 2D dct conversion
# convert image a from spatial domain to frequency domain
return dct(dct(a.T, norm='ortho').T, norm='ortho')
def idct2(a):
# 2D idct converstion
# convert image from freuqency domain back to spatial domain
return idct(idct(a.T, norm='ortho').T, norm='ortho')
# create a variable to hold dct coefficients
img_size = img_gray.shape
# for forward 2d DCT on 8x8 block
dct_8x8 = np.zeros(img_size)
for i in np.r_[:img_size[0]:8]:
for j in np.r_[:img_size[1]:8]:
# Apply DCT to the image every 8x8 block of it.
dct_8x8[i:(i+8), j:(j+8)] = dct(img_gray[i:(i+8), j:(j+8)])
# now inverse 2d DCT on 8x8 block
dct_8x8_reconstructed = np.zeros(img_size)
for i in np.r_[:img_size[0]:8]:
for j in np.r_[:img_size[1]:8]:
# Apply inverse DCT to the DCT results every 8x8 block of it.
dct_8x8_reconstructed[i:(i+8), j:(j+8)
] = idct(dct_8x8[i:(i+8), j:(j+8)])
# Threshold (TRY YOUR THRESHOLD!!!!)
thresh = 0.01
# discard those coefficients below threshold
dct_thresh = dct_8x8 * (abs(dct_8x8) > (thresh*np.max(dct_8x8)))
percent_nonzeros = np.sum(dct_thresh != 0.0) / (img_size[0]*img_size[1]*1.0)
print(f"Keeping only {percent_nonzeros*100.0} of the DCT coefficients")
# now inverse 2d DCT on 8x8 block for thie threashold-ed DCT coefficients
dct_8x8_reconstructed_th = np.zeros(img_size)
for i in np.r_[:img_size[0]:8]:
for j in np.r_[:img_size[1]:8]:
# Apply inverse DCT to the DCT results every 8x8 block of it.
dct_8x8_reconstructed_th[i:(i+8), j:(j+8)
] = idct(dct_thresh[i:(i+8), j:(j+8)])
# specify a position
pos = 20
print(f"Image data at pos {pos, pos} to {pos+8, pos+8}")
print(img_gray[pos:pos+8, pos:pos+8])
print(f"DCT coefficients at pos {pos, pos} to {pos+8, pos+8}")
print(dct_8x8[pos:pos+8, pos:pos+8])
print(f"DCT coefficients with threshold at pos {pos, pos} to {pos+8, pos+8}")
print(dct_thresh[pos:pos+8, pos:pos+8])
plt.gray()
plt.subplot(131)
plt.imshow(img_gray)
plt.title('original image')
plt.subplot(132)
plt.imshow(dct_8x8_reconstructed)
plt.title('reconstructed image (DCT+IDCT) 8x8 block')
plt.subplot(133)
plt.imshow(dct_8x8_reconstructed_th)
plt.title('reconstructed image with threshold')
plt.show()
# Question 1:
# think about changing threshold of DCT?
# change thresh at Line 47 to see how this value affect final output
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.gray",
"scipy.fft.idct",
"matplotlib.pyplot.show",
"numpy.sum",
"matplotlib.pyplot.imshow",
"numpy.zeros",
"cv2.imread",
"numpy.max",
"scipy.fft.dct",
"os.path.join"
] | [((242, 281), 'os.path.join', 'os.path.join', (['""".."""', '"""data"""', '"""zelda.bmp"""'], {}), "('..', 'data', 'zelda.bmp')\n", (254, 281), False, 'import os\n'), ((342, 384), 'cv2.imread', 'cv2.imread', (['IMG_FILE', 'cv2.IMREAD_GRAYSCALE'], {}), '(IMG_FILE, cv2.IMREAD_GRAYSCALE)\n', (352, 384), False, 'import cv2\n'), ((912, 930), 'numpy.zeros', 'np.zeros', (['img_size'], {}), '(img_size)\n', (920, 930), True, 'import numpy as np\n'), ((1189, 1207), 'numpy.zeros', 'np.zeros', (['img_size'], {}), '(img_size)\n', (1197, 1207), True, 'import numpy as np\n'), ((1887, 1905), 'numpy.zeros', 'np.zeros', (['img_size'], {}), '(img_size)\n', (1895, 1905), True, 'import numpy as np\n'), ((2527, 2537), 'matplotlib.pyplot.gray', 'plt.gray', ([], {}), '()\n', (2535, 2537), True, 'import matplotlib.pyplot as plt\n'), ((2539, 2555), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(131)'], {}), '(131)\n', (2550, 2555), True, 'import matplotlib.pyplot as plt\n'), ((2557, 2577), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img_gray'], {}), '(img_gray)\n', (2567, 2577), True, 'import matplotlib.pyplot as plt\n'), ((2579, 2606), 'matplotlib.pyplot.title', 'plt.title', (['"""original image"""'], {}), "('original image')\n", (2588, 2606), True, 'import matplotlib.pyplot as plt\n'), ((2608, 2624), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(132)'], {}), '(132)\n', (2619, 2624), True, 'import matplotlib.pyplot as plt\n'), ((2626, 2659), 'matplotlib.pyplot.imshow', 'plt.imshow', (['dct_8x8_reconstructed'], {}), '(dct_8x8_reconstructed)\n', (2636, 2659), True, 'import matplotlib.pyplot as plt\n'), ((2661, 2714), 'matplotlib.pyplot.title', 'plt.title', (['"""reconstructed image (DCT+IDCT) 8x8 block"""'], {}), "('reconstructed image (DCT+IDCT) 8x8 block')\n", (2670, 2714), True, 'import matplotlib.pyplot as plt\n'), ((2716, 2732), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(133)'], {}), '(133)\n', (2727, 2732), True, 'import matplotlib.pyplot as plt\n'), ((2734, 2770), 'matplotlib.pyplot.imshow', 'plt.imshow', (['dct_8x8_reconstructed_th'], {}), '(dct_8x8_reconstructed_th)\n', (2744, 2770), True, 'import matplotlib.pyplot as plt\n'), ((2772, 2819), 'matplotlib.pyplot.title', 'plt.title', (['"""reconstructed image with threshold"""'], {}), "('reconstructed image with threshold')\n", (2781, 2819), True, 'import matplotlib.pyplot as plt\n'), ((2821, 2831), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2829, 2831), True, 'import matplotlib.pyplot as plt\n'), ((1651, 1676), 'numpy.sum', 'np.sum', (['(dct_thresh != 0.0)'], {}), '(dct_thresh != 0.0)\n', (1657, 1676), True, 'import numpy as np\n'), ((1095, 1126), 'scipy.fft.dct', 'dct', (['img_gray[i:i + 8, j:j + 8]'], {}), '(img_gray[i:i + 8, j:j + 8])\n', (1098, 1126), False, 'from scipy.fft import dct, idct\n'), ((1432, 1463), 'scipy.fft.idct', 'idct', (['dct_8x8[i:i + 8, j:j + 8]'], {}), '(dct_8x8[i:i + 8, j:j + 8])\n', (1436, 1463), False, 'from scipy.fft import dct, idct\n'), ((2136, 2170), 'scipy.fft.idct', 'idct', (['dct_thresh[i:i + 8, j:j + 8]'], {}), '(dct_thresh[i:i + 8, j:j + 8])\n', (2140, 2170), False, 'from scipy.fft import dct, idct\n'), ((579, 601), 'scipy.fft.dct', 'dct', (['a.T'], {'norm': '"""ortho"""'}), "(a.T, norm='ortho')\n", (582, 601), False, 'from scipy.fft import dct, idct\n'), ((748, 771), 'scipy.fft.idct', 'idct', (['a.T'], {'norm': '"""ortho"""'}), "(a.T, norm='ortho')\n", (752, 771), False, 'from scipy.fft import dct, idct\n'), ((1613, 1628), 'numpy.max', 'np.max', (['dct_8x8'], {}), '(dct_8x8)\n', (1619, 1628), True, 'import numpy as np\n')] |
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
import numpy as np
import os
from pyiron_atomistics.atomistics.structure.atoms import Atoms
from pyiron_base.generic.hdfio import FileHDFio
from pyiron_base._tests import ToyJob, TestWithProject
from pyiron_electrochemistry.atomistic.geometry.water import WaterGeometryCalculator, get_angle_traj_vectors
import unittest
class WaterToyJob(ToyJob):
def __init__(self, project, job_name):
super(WaterToyJob, self).__init__(project, job_name)
filename = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"../../static/water_bulk_tip3p_traj",
)
abs_filename = os.path.abspath(filename)
self._hdf_obj = FileHDFio(abs_filename)
self._structure = Atoms().from_hdf(self._hdf_obj["input"])
@property
def structure(self):
return self._structure
# This function is executed
def run_static(self):
self.status.running = True
self.output.unwrapped_positions = self._hdf_obj["output/generic/unwrapped_positions"]
self.status.finished = True
self.to_hdf()
class TestWaterGeometry(TestWithProject):
@classmethod
def setUpClass(cls):
super().setUpClass()
job = cls.project.create_job(job_type=WaterToyJob, job_name="water_bulk")
job.run()
cls.water_geo = WaterGeometryCalculator(job)
struct = cls.water_geo.structure.copy()
cls.oh_vec_1 = list()
cls.oh_vec_2 = list()
cls.oh_angles = list()
for pos in job.output.unwrapped_positions:
oh_vec_1 = list()
oh_vec_2 = list()
oh_angles = list()
struct.positions = pos
for i, oxy_ind in enumerate(cls.water_geo.water_oxygen_indices):
vec_1 = struct.get_distance(oxy_ind, cls.water_geo.water_hydrogen_indices[i, 0], vector=True)
vec_2 = struct.get_distance(oxy_ind, cls.water_geo.water_hydrogen_indices[i, 1], vector=True)
oh_vec_1.append(vec_1)
oh_vec_2.append(vec_2)
oh_angles.append(struct.get_angle(cls.water_geo.water_hydrogen_indices[i, 0], oxy_ind,
cls.water_geo.water_hydrogen_indices[i, 1]))
cls.oh_vec_1.append(oh_vec_1)
cls.oh_vec_2.append(oh_vec_2)
cls.oh_angles.append(oh_angles)
def test_consistency(self):
self.assertEqual(self.water_geo.structure.get_chemical_formula(), 'H54O27')
self.assertEqual(len(self.water_geo.water_oxygen_indices), 27)
self.assertEqual(len(self.water_geo.water_hydrogen_indices[:, 0]), 27)
self.assertEqual(len(self.water_geo.water_hydrogen_indices[:, 1]), 27)
self.assertEqual(len(np.intersect1d(self.water_geo.structure.select_index("H"),
self.water_geo.water_hydrogen_indices[:, 0])), 27)
self.assertEqual(len(np.intersect1d(self.water_geo.structure.select_index("H"),
self.water_geo.water_hydrogen_indices[:, 1])), 27)
self.assertEqual(np.intersect1d(self.water_geo.water_hydrogen_indices[:, 1],
self.water_geo.water_hydrogen_indices[:, 0]).tolist(), [])
def test_get_intra_oh_vec(self):
oh_vec_1, oh_vec_2 = self.water_geo._get_intra_oh_vec()
self.assertTrue(np.allclose(oh_vec_1, np.array(self.oh_vec_1)))
self.assertTrue(np.allclose(oh_vec_2, np.array(self.oh_vec_2)))
def test_intra_oh_distances(self):
self.assertEqual(self.water_geo.intra_oh_distances.shape, (2, 11, 27))
self.assertEqual(self.water_geo.intra_oh_distances.max(), 1.0571081688094743)
self.assertEqual(self.water_geo.intra_oh_distances.min(), 0.9323863104101667)
def test_intra_oh_angles(self):
self.assertEqual(self.water_geo.bond_angles.shape, (11, 27))
self.assertTrue(np.allclose(self.water_geo.bond_angles, np.array(self.oh_angles) * np.pi / 180))
def test_get_angles_traj_vectors(self):
self.assertTrue(np.allclose(np.array(self.oh_angles) * np.pi / 180,
get_angle_traj_vectors(np.array(self.oh_vec_1), np.array(self.oh_vec_2))))
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"os.path.abspath",
"pyiron_base.generic.hdfio.FileHDFio",
"numpy.array",
"pyiron_atomistics.atomistics.structure.atoms.Atoms",
"numpy.intersect1d",
"pyiron_electrochemistry.atomistic.geometry.water.WaterGeometryCalculator"
] | [((4480, 4495), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4493, 4495), False, 'import unittest\n'), ((825, 850), 'os.path.abspath', 'os.path.abspath', (['filename'], {}), '(filename)\n', (840, 850), False, 'import os\n'), ((875, 898), 'pyiron_base.generic.hdfio.FileHDFio', 'FileHDFio', (['abs_filename'], {}), '(abs_filename)\n', (884, 898), False, 'from pyiron_base.generic.hdfio import FileHDFio\n'), ((1523, 1551), 'pyiron_electrochemistry.atomistic.geometry.water.WaterGeometryCalculator', 'WaterGeometryCalculator', (['job'], {}), '(job)\n', (1546, 1551), False, 'from pyiron_electrochemistry.atomistic.geometry.water import WaterGeometryCalculator, get_angle_traj_vectors\n'), ((714, 739), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (729, 739), False, 'import os\n'), ((925, 932), 'pyiron_atomistics.atomistics.structure.atoms.Atoms', 'Atoms', ([], {}), '()\n', (930, 932), False, 'from pyiron_atomistics.atomistics.structure.atoms import Atoms\n'), ((3615, 3638), 'numpy.array', 'np.array', (['self.oh_vec_1'], {}), '(self.oh_vec_1)\n', (3623, 3638), True, 'import numpy as np\n'), ((3687, 3710), 'numpy.array', 'np.array', (['self.oh_vec_2'], {}), '(self.oh_vec_2)\n', (3695, 3710), True, 'import numpy as np\n'), ((3308, 3417), 'numpy.intersect1d', 'np.intersect1d', (['self.water_geo.water_hydrogen_indices[:, 1]', 'self.water_geo.water_hydrogen_indices[:, 0]'], {}), '(self.water_geo.water_hydrogen_indices[:, 1], self.water_geo.\n water_hydrogen_indices[:, 0])\n', (3322, 3417), True, 'import numpy as np\n'), ((4395, 4418), 'numpy.array', 'np.array', (['self.oh_vec_1'], {}), '(self.oh_vec_1)\n', (4403, 4418), True, 'import numpy as np\n'), ((4420, 4443), 'numpy.array', 'np.array', (['self.oh_vec_2'], {}), '(self.oh_vec_2)\n', (4428, 4443), True, 'import numpy as np\n'), ((4174, 4198), 'numpy.array', 'np.array', (['self.oh_angles'], {}), '(self.oh_angles)\n', (4182, 4198), True, 'import numpy as np\n'), ((4296, 4320), 'numpy.array', 'np.array', (['self.oh_angles'], {}), '(self.oh_angles)\n', (4304, 4320), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import sys
import numpy as np
import telescope_1d
import os
ndishes = int(sys.argv[1])
npix = int(sys.argv[2])
redundant = bool(sys.argv[3]=="1")
redstr = 'red' if redundant else 'nred'
t = None
for seed in range(30):
for sigt in 'sig point gauss unif'.split():
sig = None
for te in [0,0.1,1.,10.,100.]:
outfname = f"out/{ndishes}_{npix}_{redstr}_{seed}_{sigt}_{te}.npy"
if os.path.isfile (outfname):
print (f"{outfname} exists.")
continue
if t is None:
if ndishes<16:
Nfreq = 512
elif ndishes <32:
Nfreq = 512
else:
Nfreq = 1024
t = telescope_1d.Telescope1D(Ndishes=ndishes, Npix_fft=npix, redundant=redundant, Nfreq=Nfreq, seed=22)
if sig is None:
if sigt == 'sig':
sig = t.get_signal(seed=seed)
elif sigt == 'point':
sig = t.get_point_source_sky(seed=seed)
elif sigt =='gauss':
sig = t.get_gaussian_sky(seed=seed)
elif sigt =='unif':
sig = t.get_uniform_sky(seed=seed)
else:
print ("Shit!")
stop
uvsig = t.observe_image(sig)
print (f"Working {outfname}", sig.sum())
uvplane, uvplane_f, uvplane_1 = t.get_obs_uvplane(uvsig, time_error_sigma=te*1e-12, filter_FG=True)
np.save(outfname,(uvplane, uvplane_f, uvplane_1))
| [
"os.path.isfile",
"numpy.save",
"telescope_1d.Telescope1D"
] | [((445, 469), 'os.path.isfile', 'os.path.isfile', (['outfname'], {}), '(outfname)\n', (459, 469), False, 'import os\n'), ((1588, 1638), 'numpy.save', 'np.save', (['outfname', '(uvplane, uvplane_f, uvplane_1)'], {}), '(outfname, (uvplane, uvplane_f, uvplane_1))\n', (1595, 1638), True, 'import numpy as np\n'), ((786, 890), 'telescope_1d.Telescope1D', 'telescope_1d.Telescope1D', ([], {'Ndishes': 'ndishes', 'Npix_fft': 'npix', 'redundant': 'redundant', 'Nfreq': 'Nfreq', 'seed': '(22)'}), '(Ndishes=ndishes, Npix_fft=npix, redundant=\n redundant, Nfreq=Nfreq, seed=22)\n', (810, 890), False, 'import telescope_1d\n')] |
from core.utils import decode_cfg, load_weights
from core.image import draw_bboxes, preprocess_image, postprocess_image, read_image, read_video, Shader
import matplotlib.pyplot as plt
import time
import cv2
import numpy as np
import tensorflow as tf
import sys
import mediapipe as mp
from djitellopy import Tello
mp_face_detection = mp.solutions.face_detection
mp_drawing = mp.solutions.drawing_utils
# from headers import YoloV4Header as Header
# from core.model.one_stage.yolov4 import YOLOv4_Tiny as Model
# cfg = decode_cfg("cfgs/coco_yolov4_tiny.yaml")
# model,evalmodel = Model(cfg,416)
from headers import YoloV4Header as Header
from core.model.one_stage.yolov4 import YOLOv4 as Model
cfg = decode_cfg("cfgs/coco_yolov4.yaml")
model,evalmodel = Model(cfg,416)
model.summary()
init_weight_path = cfg['train']['init_weight_path']
if init_weight_path:
print('Load Weights File From:', init_weight_path)
load_weights(model, init_weight_path)
else:
raise SystemExit('init_weight_path is Empty !')
shader = Shader(cfg['yolo']['num_classes'])
names = cfg['yolo']['names']
# image_size = cfg['test']['image_size'][0]
image_size = 416
iou_threshold = cfg["yolo"]["iou_threshold"]
score_threshold = cfg["yolo"]["score_threshold"]
max_outputs = cfg["yolo"]["max_boxes"]
num_classes = cfg["yolo"]["num_classes"]
strides = cfg["yolo"]["strides"]
mask = cfg["yolo"]["mask"]
anchors = cfg["yolo"]["anchors"]
print(image_size)
def preprocess_image(image, size, bboxes=None):
"""
:param image: RGB, uint8
:param size:
:param bboxes:
:return: RGB, uint8
"""
iw, ih = size
h, w, _ = image.shape
scale = min(iw / w, ih / h)
nw, nh = int(scale * w), int(scale * h)
image_resized = cv2.resize(image, (nw, nh))
image_paded = np.full(shape=[ih, iw, 3], dtype=np.uint8, fill_value=127)
dw, dh = (iw - nw) // 2, (ih - nh) // 2
image_paded[dh:nh + dh, dw:nw + dw, :] = image_resized
if bboxes is None:
return image_paded
else:
bboxes = np.asarray(bboxes).astype(np.float32)
bboxes[:, [0, 2]] = bboxes[:, [0, 2]] * scale + dw
bboxes[:, [1, 3]] = bboxes[:, [1, 3]] * scale + dh
return image_paded, bboxes
def inference(image):
h, w = image.shape[:2]
image = preprocess_image(image, (image_size, image_size)).astype(np.float32)
images = np.expand_dims(image, axis=0)
images = images/255.
tic = time.time()
pred = model.predict(images)
bboxes, scores, classes, valid_detections = Header(80, anchors, mask, strides, 10,
iou_threshold, score_threshold,inputs = pred)
# bboxes, scores, classes, valid_detections = evalmodel.predict(images)
toc = time.time()
bboxes = bboxes[0][:valid_detections[0]]
scores = scores[0][:valid_detections[0]]
classes = classes[0][:valid_detections[0]]
# bboxes *= image_size
_, bboxes = postprocess_image(image, (w, h), bboxes.numpy())
return (toc - tic) * 1000, bboxes, scores, classes
def intializeTello():
myDrone = Tello()
myDrone.connect()
myDrone.for_back_velocity = 0
myDrone.left_right_velocity = 0
myDrone.up_down_velocity = 0
myDrone.yaw_velocity = 0
myDrone.speed = 0
print(myDrone.get_battery())
myDrone.streamoff()
myDrone.streamon()
return myDrone
def telloGetFrame(myDrone, w, h):
myFrame = myDrone.get_frame_read()
myFrame = myFrame.frame
img = cv2.resize(myFrame, (w, h))
return img
myDrone = intializeTello()
w=640
h=480
myDrone.takeoff()
# cap = cv2.VideoCapture(0)
# tracker = CentroidTracker(max_lost=10, tracker_output_format='mot_challenge')
start = time.time()
while(True):
# ret, frame = cap.read()
frame = telloGetFrame(myDrone, w, h)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
ms, bboxes, scores, classes = inference(frame)
image = draw_bboxes(frame, bboxes, scores, classes, names, shader)
if time.time() - start > 50 :
myDrone.send_rc_control(0, -5,
0, 10)
elif time.time() - start > 30 :
myDrone.send_rc_control(0, 5,
0, -15)
elif time.time()-start > 0:
myDrone.send_rc_control(0, 5,
0, 10)
# tracks = tracker.update(bboxes, scores, classes)
# updated_image = draw_tracks(image, tracks)
cv2.imshow("image", image)
print('Inference Time:', ms, 'ms')
print('Fps:', 1000/ms)
frame = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
myDrone.land()
break
# cap.release()
cv2.destroyAllWindows()
| [
"numpy.full",
"core.model.one_stage.yolov4.YOLOv4",
"cv2.cvtColor",
"cv2.waitKey",
"numpy.asarray",
"cv2.imshow",
"numpy.expand_dims",
"core.image.Shader",
"time.time",
"core.utils.decode_cfg",
"djitellopy.Tello",
"headers.YoloV4Header",
"core.image.preprocess_image",
"core.utils.load_weig... | [((704, 739), 'core.utils.decode_cfg', 'decode_cfg', (['"""cfgs/coco_yolov4.yaml"""'], {}), "('cfgs/coco_yolov4.yaml')\n", (714, 739), False, 'from core.utils import decode_cfg, load_weights\n'), ((758, 773), 'core.model.one_stage.yolov4.YOLOv4', 'Model', (['cfg', '(416)'], {}), '(cfg, 416)\n', (763, 773), True, 'from core.model.one_stage.yolov4 import YOLOv4 as Model\n'), ((1030, 1064), 'core.image.Shader', 'Shader', (["cfg['yolo']['num_classes']"], {}), "(cfg['yolo']['num_classes'])\n", (1036, 1064), False, 'from core.image import draw_bboxes, preprocess_image, postprocess_image, read_image, read_video, Shader\n'), ((3675, 3686), 'time.time', 'time.time', ([], {}), '()\n', (3684, 3686), False, 'import time\n'), ((4709, 4732), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4730, 4732), False, 'import cv2\n'), ((922, 959), 'core.utils.load_weights', 'load_weights', (['model', 'init_weight_path'], {}), '(model, init_weight_path)\n', (934, 959), False, 'from core.utils import decode_cfg, load_weights\n'), ((1740, 1767), 'cv2.resize', 'cv2.resize', (['image', '(nw, nh)'], {}), '(image, (nw, nh))\n', (1750, 1767), False, 'import cv2\n'), ((1787, 1845), 'numpy.full', 'np.full', ([], {'shape': '[ih, iw, 3]', 'dtype': 'np.uint8', 'fill_value': '(127)'}), '(shape=[ih, iw, 3], dtype=np.uint8, fill_value=127)\n', (1794, 1845), True, 'import numpy as np\n'), ((2367, 2396), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (2381, 2396), True, 'import numpy as np\n'), ((2434, 2445), 'time.time', 'time.time', ([], {}), '()\n', (2443, 2445), False, 'import time\n'), ((2527, 2614), 'headers.YoloV4Header', 'Header', (['(80)', 'anchors', 'mask', 'strides', '(10)', 'iou_threshold', 'score_threshold'], {'inputs': 'pred'}), '(80, anchors, mask, strides, 10, iou_threshold, score_threshold,\n inputs=pred)\n', (2533, 2614), True, 'from headers import YoloV4Header as Header\n'), ((2718, 2729), 'time.time', 'time.time', ([], {}), '()\n', (2727, 2729), False, 'import time\n'), ((3054, 3061), 'djitellopy.Tello', 'Tello', ([], {}), '()\n', (3059, 3061), False, 'from djitellopy import Tello\n'), ((3456, 3483), 'cv2.resize', 'cv2.resize', (['myFrame', '(w, h)'], {}), '(myFrame, (w, h))\n', (3466, 3483), False, 'import cv2\n'), ((3783, 3821), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (3795, 3821), False, 'import cv2\n'), ((3885, 3943), 'core.image.draw_bboxes', 'draw_bboxes', (['frame', 'bboxes', 'scores', 'classes', 'names', 'shader'], {}), '(frame, bboxes, scores, classes, names, shader)\n', (3896, 3943), False, 'from core.image import draw_bboxes, preprocess_image, postprocess_image, read_image, read_video, Shader\n'), ((4438, 4464), 'cv2.imshow', 'cv2.imshow', (['"""image"""', 'image'], {}), "('image', image)\n", (4448, 4464), False, 'import cv2\n'), ((4543, 4581), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (4555, 4581), False, 'import cv2\n'), ((4586, 4612), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (4596, 4612), False, 'import cv2\n'), ((2285, 2334), 'core.image.preprocess_image', 'preprocess_image', (['image', '(image_size, image_size)'], {}), '(image, (image_size, image_size))\n', (2301, 2334), False, 'from core.image import draw_bboxes, preprocess_image, postprocess_image, read_image, read_video, Shader\n'), ((3951, 3962), 'time.time', 'time.time', ([], {}), '()\n', (3960, 3962), False, 'import time\n'), ((4619, 4633), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (4630, 4633), False, 'import cv2\n'), ((2028, 2046), 'numpy.asarray', 'np.asarray', (['bboxes'], {}), '(bboxes)\n', (2038, 2046), True, 'import numpy as np\n'), ((4073, 4084), 'time.time', 'time.time', ([], {}), '()\n', (4082, 4084), False, 'import time\n'), ((4220, 4231), 'time.time', 'time.time', ([], {}), '()\n', (4229, 4231), False, 'import time\n')] |
from .methodtools import cached_property, cached_method
import warnings
class MissingPackage(UserWarning):
pass
try:
import ffmpeg
except ImportError:
warnings.warn('pip3 install ffmpeg-python', MissingPackage)
try:
import numpy as np
except ImportError:
warnings.warn('pip3 install numpy', MissingPackage)
try:
from scipy.io import wavfile
except ImportError:
warnings.warn('pip3 install scipy', MissingPackage)
import os
import ffmpeg
import numpy as np
from scipy.io import wavfile
def ffmpeg_run(out):
try:
ffmpeg.run(out, capture_stdout=True, capture_stderr=True,
overwrite_output=True)
except ffmpeg.Error as e:
print(e.stderr.decode())
raise
return
class Sequence(np.ndarray):
rate: float
def __new__(cls, arr, rate: float):
obj = np.asarray(arr).view(cls)
obj.rate = rate # (sample rate Hz)
return obj
@classmethod
def from_wav(cls, file):
rate, data = wavfile.read(file)
return cls(data, rate=rate)
def to_wav(self, file):
return wavfile.write(file, rate=self.rate, data=self)
@cached_property
def duration(self):
return len(self) / self.rate
def _float_index(self, idx_float):
idx_floor = np.floor(idx_float).astype(int)
p_next = idx_float - idx_floor
idx_next = np.minimum(idx_floor + 1, len(self) - 1)
arr = self[idx_floor] * (1 - p_next) + self[idx_next] * p_next
return self.rated(arr.astype(self.dtype))
def rated(self, arr):
return self.__class__(arr, self.rate)
def clip_cast(self, arr):
if arr.dtype != self.dtype:
lo = np.ma.maximum_fill_value(self.dtype)
hi = np.ma.minimum_fill_value(self.dtype)
arr = np.clip(arr, lo, hi).astype(self.dtype)
return self.__class__(arr, self.rate)
def total_rms(self):
return float(np.sqrt(np.mean(np.square(self, dtype=np.float32))))
def moving_average(self, width: float):
n = len(self)
r = max(1, int(0.5 + width * self.rate / 2))
assert r > 0 and n > 1
idxlo = np.maximum(np.arange(n) - r, 0)
idxhi = np.minimum(np.arange(n) + r, n - 1)
cum = np.cumsum(self)
mav = (cum[idxhi] - cum[idxlo]) / (idxhi - idxlo)
return self.rated(mav)
def envelope(self, width: float):
rms = self.rated(np.square(self.astype(np.float32)))
mav = np.sqrt(rms.moving_average(width))
return self.rated(mav)
@cached_property
def _cached_std_envelope(self):
env = self.envelope(0.5)
#env = env.moving_average(3)
return self.rated(env / env.std())
@cached_method(maxsize=5)
def cached_resample(self, rate: float):
return self.resample(rate)
@cached_property
def time(self):
return np.arange(len(self)) / self.rate
def plot(self, ax=None):
import matplotlib.pyplot as plt
if ax == None:
ax = plt.gca()
return ax.plot(self.time, self)
@classmethod
def from_video(cls, file, rate, mono=True):
tmp = f'{file}.tmp.wav'
try:
out = ffmpeg.input(file)['a:0']
out = ffmpeg.output(out, tmp, ar=rate, ac=int(mono))
ffmpeg_run(out)
seq = cls.from_wav(tmp)
finally:
if os.path.exists(tmp):
os.remove(tmp)
return seq
def normalized_to(self, ref, width: float):
that = np.maximum(ref.envelope(width), 1e-3)
this = np.maximum(self.envelope(width), 1e-3)
return self.clip_cast(self * (that / this))
def to_ogg(self, file):
assert file.endswith('.ogg'), f'File {file} must end in .ogg'
wav = f'{file}.wav'
self.to_wav(wav)
ffmpeg_run(ffmpeg.output(ffmpeg.input(wav), file))
os.remove(wav)
return
def resample(self, out_rate: float):
in_duration = self.duration
out_duration = self.duration * out_rate / self.rate
out = self.respeed(in_duration, out_duration)
out.rate = out_rate
return out
def respeed(self, in_duration: float, out_duration: float):
'''
output signal has out_duration and same rate as self, but
the content resembles self from 0 to in_duration (at different playback speed)
'''
assert 0 < min(
in_duration,
out_duration), f'Non sense: {in_duration}s {out_duration}s'
in_samples = int(0.5 + in_duration * self.rate)
out_samples = int(0.5 + out_duration * self.rate)
if out_duration == in_duration:
seq = self.rated(self[:in_samples])
else:
assert 0 < out_samples < 3e8, (out_samples, in_samples, in_duration,
out_duration)
idx = np.linspace(0, in_samples, out_samples, endpoint=False)
seq = self._float_index(idx)
return seq
def _synced_to_plots(self, ref, start, end):
this, that = self._synced_to_valid(ref, start, end)
import matplotlib.pyplot as plt
plt.plot(this.time, this)
plt.plot(that.time, that, alpha=0.5)
plt.show()
#plt.scatter(this, that, alpha=0.1, marker='.')
#plt.show()
return
def _synced_to_corr(self, ref, start, end):
x, y = self._synced_to_valid(ref, start, end)
C = np.corrcoef(x, y)
total_duration = (self.duration + ref.duration)
valid_duration = (x.duration + y.duration)
return C[0, 1] * valid_duration / total_duration
def _synced_to_valid(self, ref, start, end):
'trims self and ref to the valid interval (no padding)'
this, lo, hi = self._synced_to(ref, start, end)
this = self.rated(this[:hi - lo])
that = self.rated(ref[lo:hi])
return this, that
def _synced_to(self, ref, start, end):
seq = self
if seq.rate != ref.rate:
seq = seq.cached_resample(ref.rate)
in_duration = seq.duration
out_duration = end - start
seq = seq.respeed(in_duration, out_duration)
lo = int(start * ref.rate)
if lo < 0:
seq = self.rated(seq[-lo:])
lo = 0
hi = min(int(end * ref.rate), lo + len(seq), len(ref))
return seq, lo, hi
def synced_to(self, ref, start, end, padcopy=True, width_normalize=10,
width_softpad=2, width_softstart=0.2):
'''
sync self to ref.
out starts at start and ends at end (seconds).
out is stretched if neccesary.
out is trimmed and padded to fill (0, self.duration)
out is padded with a copy of ref if padcopy==True
out padding is soften around border
'''
seq, lo, hi = self._synced_to(ref, start, end)
out = self.rated(np.zeros_like(ref))
out[lo:hi] = seq[:hi - lo]
if width_normalize > 0:
out = out.normalized_to(ref, width_normalize)
if padcopy:
out[:lo] = ref[:lo]
out[hi:] = ref[hi:]
if width_softpad > 0:
k = min(int(ref.rate * width_softpad), hi - lo)
p = np.linspace(0, 1, k)
out[lo:lo + k] = p * out[lo:lo + k] + (1 - p) * ref[lo:lo + k]
out[hi - k:hi] = (1 - p) * out[hi - k:hi] + p * ref[hi - k:hi]
if width_softstart > 0:
k = min(int(ref.rate * width_softstart), len(out) // 2)
p = np.linspace(0, 1, k)
out[:k] = p * out[:k]
out[-k:] = (1 - p) * out[-k:]
return out
@cached_property
def _std_envelope(self):
env = self.envelope(5)
env /= self.envelope(60)
env = (env / env.mean() - 1) / env.std()
return self.rated(env)
def _sync_to_delays(self, ref):
start = (ref.duration - self.duration) / 2
rend = -start
d_ref = ref.duration
d_self = self.duration
this = _this = self._std_envelope.resample(200)
that = _that = ref._std_envelope.resample(200)
radius = max(3, 1.3 * abs(start))
nparts = 31
corr = 0
a = start
b = rend
while radius > 0.001:
rate = min(nparts / radius, 200)
this = _this.resample(rate)
that = _that.resample(rate)
points = []
search = radius * np.linspace(-1, 1, nparts)
for a in search + start:
for b in search + rend:
d = d_ref + b - a
drastic = d <= 0 or max(d / d_self, d_self / d) >= 1.2
if not drastic:
c = this._synced_to_corr(that, a, d_ref + b)
points.append((c, a, b))
corr, start, rend = max(*points)
radius *= min(max(2.5 / nparts, 0.25), 0.75)
nparts = max(5, 1 + nparts // 2)
if corr < 0.3:
print(
f' r={radius:.3f} [{start:.3f}s, {rend:.3f}s] corr={corr:.4f}')
this._synced_to_plots(that, a, d_ref + b)
#assert corr >= 0.3, (f'Insufficient correlation', corr, start, rend)
return start, d_ref + rend, corr
| [
"os.remove",
"numpy.floor",
"numpy.clip",
"scipy.io.wavfile.read",
"ffmpeg.run",
"numpy.arange",
"matplotlib.pyplot.gca",
"numpy.zeros_like",
"os.path.exists",
"scipy.io.wavfile.write",
"numpy.cumsum",
"numpy.linspace",
"matplotlib.pyplot.show",
"numpy.ma.minimum_fill_value",
"numpy.corr... | [((167, 226), 'warnings.warn', 'warnings.warn', (['"""pip3 install ffmpeg-python"""', 'MissingPackage'], {}), "('pip3 install ffmpeg-python', MissingPackage)\n", (180, 226), False, 'import warnings\n'), ((279, 330), 'warnings.warn', 'warnings.warn', (['"""pip3 install numpy"""', 'MissingPackage'], {}), "('pip3 install numpy', MissingPackage)\n", (292, 330), False, 'import warnings\n'), ((393, 444), 'warnings.warn', 'warnings.warn', (['"""pip3 install scipy"""', 'MissingPackage'], {}), "('pip3 install scipy', MissingPackage)\n", (406, 444), False, 'import warnings\n'), ((558, 643), 'ffmpeg.run', 'ffmpeg.run', (['out'], {'capture_stdout': '(True)', 'capture_stderr': '(True)', 'overwrite_output': '(True)'}), '(out, capture_stdout=True, capture_stderr=True, overwrite_output=True\n )\n', (568, 643), False, 'import ffmpeg\n'), ((1005, 1023), 'scipy.io.wavfile.read', 'wavfile.read', (['file'], {}), '(file)\n', (1017, 1023), False, 'from scipy.io import wavfile\n'), ((1104, 1150), 'scipy.io.wavfile.write', 'wavfile.write', (['file'], {'rate': 'self.rate', 'data': 'self'}), '(file, rate=self.rate, data=self)\n', (1117, 1150), False, 'from scipy.io import wavfile\n'), ((2263, 2278), 'numpy.cumsum', 'np.cumsum', (['self'], {}), '(self)\n', (2272, 2278), True, 'import numpy as np\n'), ((3893, 3907), 'os.remove', 'os.remove', (['wav'], {}), '(wav)\n', (3902, 3907), False, 'import os\n'), ((5171, 5196), 'matplotlib.pyplot.plot', 'plt.plot', (['this.time', 'this'], {}), '(this.time, this)\n', (5179, 5196), True, 'import matplotlib.pyplot as plt\n'), ((5205, 5241), 'matplotlib.pyplot.plot', 'plt.plot', (['that.time', 'that'], {'alpha': '(0.5)'}), '(that.time, that, alpha=0.5)\n', (5213, 5241), True, 'import matplotlib.pyplot as plt\n'), ((5250, 5260), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5258, 5260), True, 'import matplotlib.pyplot as plt\n'), ((5467, 5484), 'numpy.corrcoef', 'np.corrcoef', (['x', 'y'], {}), '(x, y)\n', (5478, 5484), True, 'import numpy as np\n'), ((1703, 1739), 'numpy.ma.maximum_fill_value', 'np.ma.maximum_fill_value', (['self.dtype'], {}), '(self.dtype)\n', (1727, 1739), True, 'import numpy as np\n'), ((1757, 1793), 'numpy.ma.minimum_fill_value', 'np.ma.minimum_fill_value', (['self.dtype'], {}), '(self.dtype)\n', (1781, 1793), True, 'import numpy as np\n'), ((3029, 3038), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3036, 3038), True, 'import matplotlib.pyplot as plt\n'), ((3395, 3414), 'os.path.exists', 'os.path.exists', (['tmp'], {}), '(tmp)\n', (3409, 3414), False, 'import os\n'), ((4897, 4952), 'numpy.linspace', 'np.linspace', (['(0)', 'in_samples', 'out_samples'], {'endpoint': '(False)'}), '(0, in_samples, out_samples, endpoint=False)\n', (4908, 4952), True, 'import numpy as np\n'), ((6916, 6934), 'numpy.zeros_like', 'np.zeros_like', (['ref'], {}), '(ref)\n', (6929, 6934), True, 'import numpy as np\n'), ((7251, 7271), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'k'], {}), '(0, 1, k)\n', (7262, 7271), True, 'import numpy as np\n'), ((7538, 7558), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'k'], {}), '(0, 1, k)\n', (7549, 7558), True, 'import numpy as np\n'), ((848, 863), 'numpy.asarray', 'np.asarray', (['arr'], {}), '(arr)\n', (858, 863), True, 'import numpy as np\n'), ((1294, 1313), 'numpy.floor', 'np.floor', (['idx_float'], {}), '(idx_float)\n', (1302, 1313), True, 'import numpy as np\n'), ((2176, 2188), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (2185, 2188), True, 'import numpy as np\n'), ((2224, 2236), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (2233, 2236), True, 'import numpy as np\n'), ((3208, 3226), 'ffmpeg.input', 'ffmpeg.input', (['file'], {}), '(file)\n', (3220, 3226), False, 'import ffmpeg\n'), ((3432, 3446), 'os.remove', 'os.remove', (['tmp'], {}), '(tmp)\n', (3441, 3446), False, 'import os\n'), ((3859, 3876), 'ffmpeg.input', 'ffmpeg.input', (['wav'], {}), '(wav)\n', (3871, 3876), False, 'import ffmpeg\n'), ((8453, 8479), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'nparts'], {}), '(-1, 1, nparts)\n', (8464, 8479), True, 'import numpy as np\n'), ((1812, 1832), 'numpy.clip', 'np.clip', (['arr', 'lo', 'hi'], {}), '(arr, lo, hi)\n', (1819, 1832), True, 'import numpy as np\n'), ((1961, 1994), 'numpy.square', 'np.square', (['self'], {'dtype': 'np.float32'}), '(self, dtype=np.float32)\n', (1970, 1994), True, 'import numpy as np\n')] |
######################################################################################
#
# Authors : <NAME>, <NAME>
# KTH
# Email : {ni<EMAIL>ika, <EMAIL>
#
# mst_utils.py: Implements MST utility functions.
#####################################################################################
import numpy as np
import networkx as nx
from tree_utils import update_topology
def get_mst(W, t_opts):
# W is a symmetric (2N - 1)x(2N - 1) matrix with MI entries. Last entry is link connection to root.
G = nx.Graph()
n_nodes = W.shape[0]
n_nodes -= 1
for i in range(n_nodes):
for j in range(n_nodes):
if (W[i, j] == -np.infty):
continue
t = t_opts[i, j] # nx.shortest_path_length(tree, i, j, weight='t')
G.add_edge(i, j, weight=W[i, j], t=t)
mst = nx.maximum_spanning_tree(G)
# print("Number of edges",mst.size())
# print("nodes", n_nodes)
return mst
def add_midpoint_root(mst, root, n_nodes):
n_leaves = (n_nodes + 1) // 2
farthest_path_len = 0.
farhest_pair = None
for leaf_1 in range(n_leaves):
for leaf_2 in range(leaf_1 + 1, n_leaves):
len_ = nx.shortest_path_length(mst, leaf_1, leaf_2, weight='t')
if len_ > farthest_path_len:
farhest_pair = (leaf_1, leaf_2)
farthest_path_len = len_
path = nx.shortest_path(mst, farhest_pair[0], farhest_pair[1], weight='t')
for i, node_i in enumerate(path):
t_leaf2i = nx.shortest_path_length(mst, farhest_pair[0], node_i, weight='t')
if t_leaf2i > farthest_path_len / 2:
node_j = path[i - 1]
#t_i2j = t_opts[node_i, node_j]/2
t_i2j = mst[node_i][node_j]['t']
mst.add_edge(node_i, root, t=t_i2j)
mst.add_edge(node_j, root, t=t_i2j)
mst.remove_edge(node_i, node_j)
return mst
def bifurcate_mst(mst, leaves, root=0):
neighbors = mst.adj # dict of neighbors and connecting weights
n_nodes = len(neighbors) + 1 # +1 for root
D = [1 if n in leaves else 3 for n in range(n_nodes)]
D[root] = 2
deleted = []
not_bifurcated = True
while not_bifurcated:
deleted, mst = deletion_step(mst, deleted, n_nodes, root)
if len(deleted) == 0 and np.all([mst.degree(n) == D[n] for n in mst]):
break
deleted, mst = insertion_step(mst, deleted, n_nodes, root, D)
mst = add_midpoint_root(mst, root, n_nodes)
update_topology(mst, root)
return mst
def deletion_step(mst, deleted, n_nodes, root):
# deletion step (proposition 5.3 in SEM paper)
n_leaves = (n_nodes + 1) // 2
for j in range(n_leaves, n_nodes):
if j in deleted or j == root:
continue
d = mst.degree(j)
if d == 1:
# internal node is leaf
mst.remove_node(j)
deleted.append(j)
elif d == 2:
nbor_i, nbor_k = [(node, mst.adj[j][node]["t"]) for node in mst.adj[j]]
t_new = nbor_i[1] + nbor_k[1]
mst.add_edge(nbor_i[0], nbor_k[0], t=t_new)
mst.remove_node(j)
deleted.append(j)
return deleted, mst
def insertion_step(mst, deleted, n_nodes, root, D):
# insertion step (proposition 5.4)
eps = 1e-10 # small positive duration used in insertion step
for i in range(n_nodes):
if i in deleted or i == root:
continue
d = mst.degree(i)
if d > D[i]:
try:
j = deleted.pop()
except IndexError:
break
nbors_i = [(node, mst.adj[i][node]["t"]) for node in mst.adj[i]]
if D[i] == 3:
idx = np.argsort([nbor[1] for nbor in nbors_i])
mst.add_edge(i, j, t=eps)
for id in idx[:2]:
mst.add_edge(nbors_i[id][0], j, t=nbors_i[id][1])
mst.remove_edge(nbors_i[id][0], i)
else:
# D[i] = 1
mst.add_edge(i, j, t=eps)
for nbor in nbors_i:
mst.add_edge(nbor[0], j, t=nbor[1])
mst.remove_edge(nbor[0], i)
return deleted, mst
| [
"networkx.shortest_path_length",
"networkx.maximum_spanning_tree",
"networkx.shortest_path",
"numpy.argsort",
"networkx.Graph",
"tree_utils.update_topology"
] | [((531, 541), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (539, 541), True, 'import networkx as nx\n'), ((851, 878), 'networkx.maximum_spanning_tree', 'nx.maximum_spanning_tree', (['G'], {}), '(G)\n', (875, 878), True, 'import networkx as nx\n'), ((1399, 1466), 'networkx.shortest_path', 'nx.shortest_path', (['mst', 'farhest_pair[0]', 'farhest_pair[1]'], {'weight': '"""t"""'}), "(mst, farhest_pair[0], farhest_pair[1], weight='t')\n", (1415, 1466), True, 'import networkx as nx\n'), ((2508, 2534), 'tree_utils.update_topology', 'update_topology', (['mst', 'root'], {}), '(mst, root)\n', (2523, 2534), False, 'from tree_utils import update_topology\n'), ((1524, 1589), 'networkx.shortest_path_length', 'nx.shortest_path_length', (['mst', 'farhest_pair[0]', 'node_i'], {'weight': '"""t"""'}), "(mst, farhest_pair[0], node_i, weight='t')\n", (1547, 1589), True, 'import networkx as nx\n'), ((1201, 1257), 'networkx.shortest_path_length', 'nx.shortest_path_length', (['mst', 'leaf_1', 'leaf_2'], {'weight': '"""t"""'}), "(mst, leaf_1, leaf_2, weight='t')\n", (1224, 1257), True, 'import networkx as nx\n'), ((3736, 3777), 'numpy.argsort', 'np.argsort', (['[nbor[1] for nbor in nbors_i]'], {}), '([nbor[1] for nbor in nbors_i])\n', (3746, 3777), True, 'import numpy as np\n')] |
# Copyright (c) [2012]-[2021] Shanghai Yitu Technology Co., Ltd.
#
# This source code is licensed under the Clear BSD License
# LICENSE file in the root directory of this file
# All rights reserved.
"""
Borrow from timm(https://github.com/rwightman/pytorch-image-models)
"""
import torch
import torch.nn as nn
import numpy as np
from timm.models.layers import DropPath
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def get_sinusoid_encoding(n_position, d_hid):
''' Sinusoid position encoding table '''
def get_position_angle_vec(position):
return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)]
sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(n_position)])
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
return torch.FloatTensor(sinusoid_table).unsqueeze(0)
| [
"torch.nn.Dropout",
"timm.models.layers.DropPath",
"numpy.power",
"torch.FloatTensor",
"numpy.sin",
"numpy.cos",
"torch.nn.Linear",
"torch.nn.Identity"
] | [((3111, 3142), 'numpy.sin', 'np.sin', (['sinusoid_table[:, 0::2]'], {}), '(sinusoid_table[:, 0::2])\n', (3117, 3142), True, 'import numpy as np\n'), ((3183, 3214), 'numpy.cos', 'np.cos', (['sinusoid_table[:, 1::2]'], {}), '(sinusoid_table[:, 1::2])\n', (3189, 3214), True, 'import numpy as np\n'), ((652, 691), 'torch.nn.Linear', 'nn.Linear', (['in_features', 'hidden_features'], {}), '(in_features, hidden_features)\n', (661, 691), True, 'import torch.nn as nn\n'), ((742, 782), 'torch.nn.Linear', 'nn.Linear', (['hidden_features', 'out_features'], {}), '(hidden_features, out_features)\n', (751, 782), True, 'import torch.nn as nn\n'), ((803, 819), 'torch.nn.Dropout', 'nn.Dropout', (['drop'], {}), '(drop)\n', (813, 819), True, 'import torch.nn as nn\n'), ((1285, 1323), 'torch.nn.Linear', 'nn.Linear', (['dim', '(dim * 3)'], {'bias': 'qkv_bias'}), '(dim, dim * 3, bias=qkv_bias)\n', (1294, 1323), True, 'import torch.nn as nn\n'), ((1349, 1370), 'torch.nn.Dropout', 'nn.Dropout', (['attn_drop'], {}), '(attn_drop)\n', (1359, 1370), True, 'import torch.nn as nn\n'), ((1391, 1410), 'torch.nn.Linear', 'nn.Linear', (['dim', 'dim'], {}), '(dim, dim)\n', (1400, 1410), True, 'import torch.nn as nn\n'), ((1436, 1457), 'torch.nn.Dropout', 'nn.Dropout', (['proj_drop'], {}), '(proj_drop)\n', (1446, 1457), True, 'import torch.nn as nn\n'), ((2353, 2372), 'timm.models.layers.DropPath', 'DropPath', (['drop_path'], {}), '(drop_path)\n', (2361, 2372), False, 'from timm.models.layers import DropPath\n'), ((2396, 2409), 'torch.nn.Identity', 'nn.Identity', ([], {}), '()\n', (2407, 2409), True, 'import torch.nn as nn\n'), ((3239, 3272), 'torch.FloatTensor', 'torch.FloatTensor', (['sinusoid_table'], {}), '(sinusoid_table)\n', (3256, 3272), False, 'import torch\n'), ((2917, 2958), 'numpy.power', 'np.power', (['(10000)', '(2 * (hid_j // 2) / d_hid)'], {}), '(10000, 2 * (hid_j // 2) / d_hid)\n', (2925, 2958), True, 'import numpy as np\n')] |
import argparse
import numpy as np
import cv2
from skimage import transform as trans
import tensorflow as tf
import os
import skimage.io as io
import sys
from tqdm import tqdm
import align.detect_face as detect_face
# Transform grey image to RGB image
def to_rgb(img):
w, h = img.shape
ret = np.empty((w, h, 3), dtype=np.uint8)
ret[:, :, 0] = ret[:, :, 1] = ret[:, :, 2] = img
return ret
# Align face as ArcFace template
def preprocess(img, landmark):
image_size = [112,112]
src = np.array([
[38.2946, 51.6963],
[73.5318, 51.5014],
[56.0252, 71.7366],
[41.5493, 92.3655],
[70.7299, 92.2041] ], dtype=np.float32)
dst = landmark.astype(np.float32)
tform = trans.SimilarityTransform()
tform.estimate(dst, src)
M = tform.params[0:2,:]
warped = cv2.warpAffine(img,M,(image_size[1],image_size[0]), borderValue = 0.0)
return warped
def main(args):
# MTCNN
with tf.Graph().as_default():
sess = tf.Session()
with sess.as_default():
pnet, rnet, onet = detect_face.create_mtcnn(sess, None)
threshold = [ 0.6, 0.7, 0.7 ]
factor = 0.709
# Output dirs creation
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
images = []
for path in sorted(os.listdir(args.input_dir)):
if not os.path.exists(os.path.join(args.output_dir,path)):
os.mkdir(os.path.join(args.output_dir,path))
images.append(path)
# for name in sorted(os.listdir(os.path.join(args.input_dir,path))):
# images.append(os.path.join(path,name))
# Alignment procedure
for path in tqdm(images):
img = io.imread(os.path.join(args.input_dir,path))
if img.ndim == 2:
img = to_rgb(img)
img = img[:,:,0:3]
_minsize = min(min(img.shape[0]//5, img.shape[1]//5),80)
bounding_boxes, points = detect_face.detect_face(img, _minsize, pnet, rnet, onet, threshold, factor)
if bounding_boxes.size>0:
bindex = -1
nrof_faces = bounding_boxes.shape[0]
if nrof_faces>0:
det = bounding_boxes[:,0:4]
img_size = np.asarray(img.shape)[0:2]
bindex = 0
if nrof_faces>1:
bounding_box_size = (det[:,2]-det[:,0])*(det[:,3]-det[:,1])
img_center = img_size / 2
offsets = np.vstack([ (det[:,0]+det[:,2])/2-img_center[1], (det[:,1]+det[:,3])/2-img_center[0] ])
offset_dist_squared = np.sum(np.power(offsets,2.0),0)
bindex = np.argmax(bounding_box_size-offset_dist_squared*2.0)
points = points[:, bindex]
landmark = points.reshape((2,5)).T
warped = preprocess(img, landmark)
io.imsave(os.path.join(args.output_dir,path), warped)
else:
print(path+' was skipped')
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', type=str, help='Directory with unaligned images.')
parser.add_argument('output_dir', type=str, help='Directory for aligned face thumbnails.')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
| [
"tqdm.tqdm",
"argparse.ArgumentParser",
"os.makedirs",
"align.detect_face.detect_face",
"align.detect_face.create_mtcnn",
"numpy.empty",
"numpy.asarray",
"numpy.argmax",
"tensorflow.Session",
"os.path.exists",
"numpy.power",
"skimage.transform.SimilarityTransform",
"cv2.warpAffine",
"numpy... | [((303, 338), 'numpy.empty', 'np.empty', (['(w, h, 3)'], {'dtype': 'np.uint8'}), '((w, h, 3), dtype=np.uint8)\n', (311, 338), True, 'import numpy as np\n'), ((510, 643), 'numpy.array', 'np.array', (['[[38.2946, 51.6963], [73.5318, 51.5014], [56.0252, 71.7366], [41.5493, \n 92.3655], [70.7299, 92.2041]]'], {'dtype': 'np.float32'}), '([[38.2946, 51.6963], [73.5318, 51.5014], [56.0252, 71.7366], [\n 41.5493, 92.3655], [70.7299, 92.2041]], dtype=np.float32)\n', (518, 643), True, 'import numpy as np\n'), ((701, 728), 'skimage.transform.SimilarityTransform', 'trans.SimilarityTransform', ([], {}), '()\n', (726, 728), True, 'from skimage import transform as trans\n'), ((800, 871), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'M', '(image_size[1], image_size[0])'], {'borderValue': '(0.0)'}), '(img, M, (image_size[1], image_size[0]), borderValue=0.0)\n', (814, 871), False, 'import cv2\n'), ((1551, 1563), 'tqdm.tqdm', 'tqdm', (['images'], {}), '(images)\n', (1555, 1563), False, 'from tqdm import tqdm\n'), ((2627, 2652), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2650, 2652), False, 'import argparse\n'), ((956, 968), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (966, 968), True, 'import tensorflow as tf\n'), ((1141, 1172), 'os.path.exists', 'os.path.exists', (['args.output_dir'], {}), '(args.output_dir)\n', (1155, 1172), False, 'import os\n'), ((1176, 1204), 'os.makedirs', 'os.makedirs', (['args.output_dir'], {}), '(args.output_dir)\n', (1187, 1204), False, 'import os\n'), ((1238, 1264), 'os.listdir', 'os.listdir', (['args.input_dir'], {}), '(args.input_dir)\n', (1248, 1264), False, 'import os\n'), ((1766, 1841), 'align.detect_face.detect_face', 'detect_face.detect_face', (['img', '_minsize', 'pnet', 'rnet', 'onet', 'threshold', 'factor'], {}), '(img, _minsize, pnet, rnet, onet, threshold, factor)\n', (1789, 1841), True, 'import align.detect_face as detect_face\n'), ((1017, 1053), 'align.detect_face.create_mtcnn', 'detect_face.create_mtcnn', (['sess', 'None'], {}), '(sess, None)\n', (1041, 1053), True, 'import align.detect_face as detect_face\n'), ((1583, 1617), 'os.path.join', 'os.path.join', (['args.input_dir', 'path'], {}), '(args.input_dir, path)\n', (1595, 1617), False, 'import os\n'), ((922, 932), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (930, 932), True, 'import tensorflow as tf\n'), ((1291, 1326), 'os.path.join', 'os.path.join', (['args.output_dir', 'path'], {}), '(args.output_dir, path)\n', (1303, 1326), False, 'import os\n'), ((1340, 1375), 'os.path.join', 'os.path.join', (['args.output_dir', 'path'], {}), '(args.output_dir, path)\n', (1352, 1375), False, 'import os\n'), ((2503, 2538), 'os.path.join', 'os.path.join', (['args.output_dir', 'path'], {}), '(args.output_dir, path)\n', (2515, 2538), False, 'import os\n'), ((1992, 2013), 'numpy.asarray', 'np.asarray', (['img.shape'], {}), '(img.shape)\n', (2002, 2013), True, 'import numpy as np\n'), ((2170, 2275), 'numpy.vstack', 'np.vstack', (['[(det[:, 0] + det[:, 2]) / 2 - img_center[1], (det[:, 1] + det[:, 3]) / 2 -\n img_center[0]]'], {}), '([(det[:, 0] + det[:, 2]) / 2 - img_center[1], (det[:, 1] + det[:,\n 3]) / 2 - img_center[0]])\n', (2179, 2275), True, 'import numpy as np\n'), ((2331, 2387), 'numpy.argmax', 'np.argmax', (['(bounding_box_size - offset_dist_squared * 2.0)'], {}), '(bounding_box_size - offset_dist_squared * 2.0)\n', (2340, 2387), True, 'import numpy as np\n'), ((2292, 2314), 'numpy.power', 'np.power', (['offsets', '(2.0)'], {}), '(offsets, 2.0)\n', (2300, 2314), True, 'import numpy as np\n')] |
import numpy as np
import numbers
import warnings
class ExtendedQuadratic():
"""
A python object that represents the extended quadratic function
f(x) = (1/2) x.T P x + q.T x + (1/2) x
+
{
0 if Fx+g=0
+infty otherwise
}
"""
def __init__(self, P, q, r, F=None, g=None):
"""
Initialize an extended quadratic function by supplying:
- P: (n,n) numpy array
- q: (n,) numpy array
- r: number
- F: (p,n) numpy array
- g: (p,) numpy array
"""
self.P = P
self.q = q.flatten()
self.r = r
# check shapes of P,q,r
assert len(self.P.shape) == 2, "P has the wrong dimensions"
assert len(self.q.shape) == 1, "q has the wrong dimensions"
n, m = self.P.shape
assert n == m, "P is not square"
assert self.q.shape[0] == n, "q is the wrong shape"
assert isinstance(self.r, numbers.Real), "r must be number"
# check shapes of F,g
if F is not None and g is not None:
assert len(F.shape) == 2, "F has the wrong dimensions"
assert len(g.shape) == 1, "g has the wrong dimensions"
assert F.shape[1] == n, "F is wrong shape"
p = F.shape[0]
assert g is not None and g.shape[0] == p, "g is wrong shape"
self.F = F
self.g = g
else:
self.F = np.empty((0, n))
self.g = np.empty(0)
def __repr__(self):
"""
Evaluates the extended quadratic.
"""
coefs = '\nP: ' + str(self.P) + '\n' + 'q: ' + \
str(self.q) + '\n' + 'r: ' + str(self.r)
eq = '\n F: ' + str(self.F) + '\ng: ' + str(self.g)
return coefs + '\n' + eq
@property
def n(self):
return self.P.shape[0]
@property
def p(self):
return self.F.shape[0]
@property
def convex(self):
if self.n == 0:
return True
_, _, V2 = self.reduced_form()
return np.all(
np.greater_equal(
np.linalg.eigvals(
V2.T@self.P@V2
), -1e-8
)
)
def __call__(self, x=None):
n = self.n
if n == 0:
return .5 * self.r
else:
assert x is not None, "Must supply argument"
assert len(x.shape) == 1, "x has wrong dimensions"
assert x.shape[0] == n, "x has wrong shape"
if self.p == 0:
satisfies_equality_constraints = True
else:
satisfies_equality_constraints = np.allclose(
np.dot(self.F, x) + self.g, 0)
if not satisfies_equality_constraints:
return float("inf")
else:
return .5 * x@self.P@x + self.q@x + .5 * self.r
def reduced_form(self):
"""
Returns:
-x0: particular solution to Fx+g=0
-V1: first part of SVD
-V2: second part of SVD
"""
if self.p == 0:
return np.zeros(self.n), np.empty((self.n, 0)), np.eye(self.n)
U, S, Vt = np.linalg.svd(self.F, full_matrices=True)
rank = np.linalg.matrix_rank(self.F)
U1 = U[:, :rank]
V1 = Vt[:rank].T
V2 = Vt[rank:].T
Sigma = S[:rank]
x0 = -V1@np.diag(1. / Sigma)@U1.T@self.g
if not np.allclose(self.F@x0 + self.g, 0):
warnings.warn("Not proper")
self.F = V1.T
self.g = np.diag(1. / S[:rank]) @ U1.T @ self.g
return x0, V1, V2
def __add__(f, g):
"""
h(x) = f(x) + g(x)
"""
if isinstance(g, numbers.Real):
h = ExtendedQuadratic(
f.P + g,
f.q + g,
f.r + g,
f.F,
f.g
)
else:
Fnew = np.r_[f.F, g.F]
gnew = np.r_[f.g, g.g]
h = ExtendedQuadratic(
f.P + g.P,
f.q + g.q,
f.r + g.r,
Fnew,
gnew
)
h.reduced_form()
return h
def __mul__(f, a):
"""
h(x) = a*f(x)
"""
h = ExtendedQuadratic(a * f.P, a * f.q, a * f.r, f.F, f.g)
return h
def __div__(f, a):
"""
h(x) = f(x)/a
"""
return (1. / a) * f
def __truediv__(f, a):
"""
h(x) = f(x)/a
"""
return (1. / a) * f
def __rmul__(f, a):
return f.__mul__(a)
def __rdiv__(f, a):
return f.__div__(a)
def __eq__(f, g):
return f.distance(g) <= 1e-8
def affine_composition(f, A, b, reduced_form=True):
"""
h(x) = f(Ax+b).
"""
h = ExtendedQuadratic(
A.T@f.P@A,
A.T@f.P@b + A.T@f.q,
b@f.P@b + 2 * f.q@b + f.r,
f.F@A,
f.F@b + f.g
)
if h.p > 0 and reduced_form:
h.reduced_form()
return h
def distance(f, g):
# d(f,g)
if not f.equality_constraints_equal(g):
return float("inf")
x0, _, V2 = f.reduced_form()
metric = np.linalg.norm(V2.T@(f.P - g.P)@V2, ord='fro')**2 + \
2 * np.linalg.norm(V2.T@(f.P@x0 + f.q - g.P@x0 - g.q), ord=2)**2 + \
(x0.T @ (f.P@x0 + 2 * f.q - g.P@x0 - 2 * g.q) + f.r - g.r)**2
return metric
def equality_constraints_equal(f, g):
x0, V1, V2 = f.reduced_form()
x0_tilde, V1_tilde, V2_tilde = g.reduced_form()
c1 = np.allclose(V1_tilde.T@V2, 0)
c2 = np.allclose(V1.T@V2_tilde, 0)
c3 = np.allclose(f.F@x0_tilde + f.g, 0)
c4 = np.allclose(g.F@x0 + g.g, 0)
return c1 and c2 and c3 and c4
def convex_indices(self, indices):
assert min(indices) >= 0 and max(indices) < self.n, "Invalid indices"
_, _, V2 = self.reduced_form()
u_mask = np.zeros(self.n, np.bool)
u_mask[indices] = True
P_uu = np.atleast_2d(self.P[u_mask, :][:, u_mask])
V2 = V2[u_mask, :]
if ((V2.T@P_uu@V2).shape == (0, 0)):
return True, True
min_eigval = np.min(np.linalg.eigvals(V2.T@P_uu@V2))
strictly_convex = min_eigval > 0
convex = min_eigval >= -1e-8
return convex, strictly_convex
def partial_minimization(self, indices):
"""
Optimal value of optimization problem
minimize_u f(x,u)
is a convex quadratic. Returns the new quadratic and (A,b) where u=Ax+b.
indices is a subset of {1,...,n+m} to minimize
"""
convex, strictly_convex = self.convex_indices(indices)
assert convex, "not extended quadratic because not convex"
n_u = len(indices)
n_x = self.n - n_u
u_mask = np.zeros(self.n, np.bool)
u_mask[indices] = True
x_mask = ~u_mask
q_u = self.q[u_mask]
P_ux = np.atleast_2d(self.P[u_mask, :][:, x_mask])
P_uu = np.atleast_2d(self.P[u_mask, :][:, u_mask])
g = self.g
F_x = self.F[:, x_mask]
F_u = self.F[:, u_mask]
F_u_pinv = np.linalg.pinv(F_u)
KKT_matrix = np.r_[
np.c_[P_uu, F_u.T],
np.c_[F_u, np.zeros((self.p, self.p))]
]
KKT_matrix_pinv = np.linalg.pinv(KKT_matrix)
Ft = (np.eye(F_u.shape[0]) - F_u@F_u_pinv)@F_x
gt = (np.eye(F_u.shape[0]) - F_u@F_u_pinv)@g
Ap = np.r_[P_ux, F_x]
bp = np.r_[q_u, g]
if n_x > 0 and not strictly_convex:
temp = ExtendedQuadratic(
np.zeros((n_x, n_x)), np.zeros(n_x), 0, Ft, gt)
x_0, V1, V2 = temp.reduced_form()
Rhs = np.c_[
Ap@V2, Ap@x_0 + bp
]
assert np.allclose(
(np.eye(KKT_matrix.shape[0]) - KKT_matrix@KKT_matrix_pinv)@Rhs,
0
), "not extended quadratic because range constraint does not hold"
A = np.zeros((self.n, n_x))
A[x_mask, :] = np.eye(n_x)
b = np.zeros(self.n)
b[x_mask] = np.zeros(n_x)
res = -np.c_[np.eye(n_u), np.zeros((n_u, self.p))] @ \
KKT_matrix_pinv @ \
np.c_[Ap, bp]
A[u_mask, :] = res[:, :-1]
b[u_mask] = res[:, -1]
f = ExtendedQuadratic(self.P, self.q, self.r)
f = f.affine_composition(A, b)
f.F = Ft
f.g = gt
return f, A[u_mask, :], b[u_mask]
def dp_infinite(sample, num_iterations, N, gamma=1):
"""
Arguments:
- sample(N): function that gives a batch sample of
- A_t: (N,K,n,n) numpy array
- B_t: (N,K,n,m) numpy array
- c_t: (N,K,n) numpy array
- g_t: length-N list of length-K list of ExtendedQuadratics
- Pi_t: (K,K) numpy array
- T: horizon length
- N: number of monte carlo iterations
This function performs the dynamic programming recursion described in the paper [].
It returns an length T+1 list of length-K list of ExtendedQuadratics representing the cost-to-go functions.
It also returns a length T list of length-K list of ExtendedQuadratics represneting the state-action cost-to-go functions.
It also returns a length T list of length-K list of policies, where each policy is a matrix+vector representing an affine function.
e.g. Vs[t][s] or Qs[t][s] or policies[t][s]
"""
# initialize the cost-to-go functions and policies
A, B, c, g, Pi = sample(1)
_, K, n, _ = A.shape
g_T = [ExtendedQuadratic(np.zeros((n, n)), np.zeros(n), 0)
for _ in range(K)]
def sample_time_invariant(t, N):
A, B, c, g, Pi = sample(N)
return A, B, c, (gamma**t) * g, Pi
Vs, Qs, policies = dp_finite(sample_time_invariant, g_T, num_iterations, N)
return Vs[0], Qs[0], policies[0]
def dp_finite(sample, g_T, T, N):
"""
Arguments:
- sample(t): function that gives a batch sample of
- A_t: (N,K,n,n) numpy array
- B_t: (N,K,n,m) numpy array
- c_t: (N,K,n) numpy array
- g_t: length-N list of length-K list of ExtendedQuadratics
- Pi_t: (K,K) numpy array
- g_T: list of length-K list of ExtendedQuadratics
- T: horizon length
- N: number of monte carlo iterations
This function performs the dynamic programming recursion described in the paper [].
It returns an length T+1 list of length-K list of ExtendedQuadratics representing the cost-to-go functions.
It also returns a length T list of length-K list of ExtendedQuadratics represneting the state-action cost-to-go functions.
It also returns a length T list of length-K list of policies, where each policy is a matrix+vector representing an affine function.
e.g. Vs[t][s] or Qs[t][s] or policies[t][s]
"""
# initialize the cost-to-go functions and policies
Vs = [[] for _ in range(T + 1)]
Qs = [[] for _ in range(T)]
policies = [[] for _ in range(T)]
Vs[-1] = g_T
# backward recursion
for t in range(T)[::-1]:
Qs[t], n, m, K = get_qs(sample, Vs[t + 1], N, t)
for Q in Qs[t]:
V, policy_A, policy_b = Q.partial_minimization(np.arange(n, n + m))
Vs[t].append(V)
policies[t].append((policy_A, policy_b))
return Vs, Qs, policies
def get_qs(sample, V, N, t):
Qs = []
A, B, c, g, Pi = sample(t, N)
_, _, _, m = B.shape
_, K, n, _ = A.shape
for s in range(K):
Q = ExtendedQuadratic(np.zeros((n + m, n + m)), np.zeros(n + m), 0)
for k in range(N):
Q += g[k][s] / N
for sprime in range(K):
Q += Pi[sprime, s] / N * \
V[sprime].affine_composition(
np.c_[A[k][s], B[k][s]], c[k][s])
Qs.append(Q)
return Qs, n, m, K
def dp_finite_mpi(sample, g_T, T, N, comm):
"""
Arguments:
- sample(t): function that gives a batch sample of
- A_t: (N,K,n,n) numpy array
- B_t: (N,K,n,m) numpy array
- c_t: (N,K,n) numpy array
- g_t: length-N list of length-K list of ExtendedQuadratics
- Pi_t: (K,K) numpy array
- g_T: list of length-K list of ExtendedQuadratics
- T: horizon length
- N: number of monte carlo iterations
This function performs the dynamic programming recursion described in the paper [].
It returns an length T+1 list of length-K list of ExtendedQuadratics representing the cost-to-go functions.
It also returns a length T list of length-K list of ExtendedQuadratics represneting the state-action cost-to-go functions.
It also returns a length T list of length-K list of policies, where each policy is a matrix+vector representing an affine function.
e.g. Vs[t][s] or Qs[t][s] or policies[t][s]
"""
# initialize the cost-to-go functions and policies
nprocs = comm.Get_size()
myrank = comm.Get_rank()
N_per_proc = int(N // nprocs) + 1
if myrank == 0:
Vs = [[] for _ in range(T + 1)]
Qs = [[] for _ in range(T)]
policies = [[] for _ in range(T)]
Vs[-1] = g_T
# backward recursion
for t in range(T)[::-1]:
if myrank == 0:
data = [(N_per_proc, Vs[t + 1])] * (nprocs)
else:
data = None
N, V = comm.scatter(data, root=0)
Qs_scattered, n, m, K = get_qs(sample, V, N, t)
data = comm.gather(Qs_scattered, root=0)
if myrank == 0:
for s in range(K):
Q = ExtendedQuadratic(
np.zeros((n + m, n + m)), np.zeros(n + m), 0)
for d in data:
Q += d[s] / nprocs
Qs[t].append(Q)
V, policy_A, policy_b = Q.partial_minimization(
np.arange(n, n + m))
Vs[t].append(V)
policies[t].append((policy_A, policy_b))
if myrank == 0:
return Vs, Qs, policies
else:
return None
| [
"numpy.linalg.eigvals",
"numpy.empty",
"numpy.allclose",
"numpy.zeros",
"numpy.linalg.svd",
"numpy.linalg.matrix_rank",
"numpy.arange",
"numpy.linalg.norm",
"numpy.dot",
"numpy.eye",
"numpy.linalg.pinv",
"warnings.warn",
"numpy.diag",
"numpy.atleast_2d"
] | [((3165, 3206), 'numpy.linalg.svd', 'np.linalg.svd', (['self.F'], {'full_matrices': '(True)'}), '(self.F, full_matrices=True)\n', (3178, 3206), True, 'import numpy as np\n'), ((3222, 3251), 'numpy.linalg.matrix_rank', 'np.linalg.matrix_rank', (['self.F'], {}), '(self.F)\n', (3243, 3251), True, 'import numpy as np\n'), ((5636, 5667), 'numpy.allclose', 'np.allclose', (['(V1_tilde.T @ V2)', '(0)'], {}), '(V1_tilde.T @ V2, 0)\n', (5647, 5667), True, 'import numpy as np\n'), ((5679, 5710), 'numpy.allclose', 'np.allclose', (['(V1.T @ V2_tilde)', '(0)'], {}), '(V1.T @ V2_tilde, 0)\n', (5690, 5710), True, 'import numpy as np\n'), ((5722, 5758), 'numpy.allclose', 'np.allclose', (['(f.F @ x0_tilde + f.g)', '(0)'], {}), '(f.F @ x0_tilde + f.g, 0)\n', (5733, 5758), True, 'import numpy as np\n'), ((5770, 5800), 'numpy.allclose', 'np.allclose', (['(g.F @ x0 + g.g)', '(0)'], {}), '(g.F @ x0 + g.g, 0)\n', (5781, 5800), True, 'import numpy as np\n'), ((6014, 6039), 'numpy.zeros', 'np.zeros', (['self.n', 'np.bool'], {}), '(self.n, np.bool)\n', (6022, 6039), True, 'import numpy as np\n'), ((6086, 6129), 'numpy.atleast_2d', 'np.atleast_2d', (['self.P[u_mask, :][:, u_mask]'], {}), '(self.P[u_mask, :][:, u_mask])\n', (6099, 6129), True, 'import numpy as np\n'), ((6898, 6923), 'numpy.zeros', 'np.zeros', (['self.n', 'np.bool'], {}), '(self.n, np.bool)\n', (6906, 6923), True, 'import numpy as np\n'), ((7025, 7068), 'numpy.atleast_2d', 'np.atleast_2d', (['self.P[u_mask, :][:, x_mask]'], {}), '(self.P[u_mask, :][:, x_mask])\n', (7038, 7068), True, 'import numpy as np\n'), ((7084, 7127), 'numpy.atleast_2d', 'np.atleast_2d', (['self.P[u_mask, :][:, u_mask]'], {}), '(self.P[u_mask, :][:, u_mask])\n', (7097, 7127), True, 'import numpy as np\n'), ((7230, 7249), 'numpy.linalg.pinv', 'np.linalg.pinv', (['F_u'], {}), '(F_u)\n', (7244, 7249), True, 'import numpy as np\n'), ((7398, 7424), 'numpy.linalg.pinv', 'np.linalg.pinv', (['KKT_matrix'], {}), '(KKT_matrix)\n', (7412, 7424), True, 'import numpy as np\n'), ((8082, 8105), 'numpy.zeros', 'np.zeros', (['(self.n, n_x)'], {}), '((self.n, n_x))\n', (8090, 8105), True, 'import numpy as np\n'), ((8129, 8140), 'numpy.eye', 'np.eye', (['n_x'], {}), '(n_x)\n', (8135, 8140), True, 'import numpy as np\n'), ((8154, 8170), 'numpy.zeros', 'np.zeros', (['self.n'], {}), '(self.n)\n', (8162, 8170), True, 'import numpy as np\n'), ((8191, 8204), 'numpy.zeros', 'np.zeros', (['n_x'], {}), '(n_x)\n', (8199, 8204), True, 'import numpy as np\n'), ((1452, 1468), 'numpy.empty', 'np.empty', (['(0, n)'], {}), '((0, n))\n', (1460, 1468), True, 'import numpy as np\n'), ((1490, 1501), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (1498, 1501), True, 'import numpy as np\n'), ((3419, 3455), 'numpy.allclose', 'np.allclose', (['(self.F @ x0 + self.g)', '(0)'], {}), '(self.F @ x0 + self.g, 0)\n', (3430, 3455), True, 'import numpy as np\n'), ((3467, 3494), 'warnings.warn', 'warnings.warn', (['"""Not proper"""'], {}), "('Not proper')\n", (3480, 3494), False, 'import warnings\n'), ((6261, 6296), 'numpy.linalg.eigvals', 'np.linalg.eigvals', (['(V2.T @ P_uu @ V2)'], {}), '(V2.T @ P_uu @ V2)\n', (6278, 6296), True, 'import numpy as np\n'), ((9669, 9685), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (9677, 9685), True, 'import numpy as np\n'), ((9687, 9698), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (9695, 9698), True, 'import numpy as np\n'), ((11655, 11679), 'numpy.zeros', 'np.zeros', (['(n + m, n + m)'], {}), '((n + m, n + m))\n', (11663, 11679), True, 'import numpy as np\n'), ((11681, 11696), 'numpy.zeros', 'np.zeros', (['(n + m)'], {}), '(n + m)\n', (11689, 11696), True, 'import numpy as np\n'), ((2115, 2152), 'numpy.linalg.eigvals', 'np.linalg.eigvals', (['(V2.T @ self.P @ V2)'], {}), '(V2.T @ self.P @ V2)\n', (2132, 2152), True, 'import numpy as np\n'), ((3089, 3105), 'numpy.zeros', 'np.zeros', (['self.n'], {}), '(self.n)\n', (3097, 3105), True, 'import numpy as np\n'), ((3107, 3128), 'numpy.empty', 'np.empty', (['(self.n, 0)'], {}), '((self.n, 0))\n', (3115, 3128), True, 'import numpy as np\n'), ((3130, 3144), 'numpy.eye', 'np.eye', (['self.n'], {}), '(self.n)\n', (3136, 3144), True, 'import numpy as np\n'), ((3535, 3558), 'numpy.diag', 'np.diag', (['(1.0 / S[:rank])'], {}), '(1.0 / S[:rank])\n', (3542, 3558), True, 'import numpy as np\n'), ((7440, 7460), 'numpy.eye', 'np.eye', (['F_u.shape[0]'], {}), '(F_u.shape[0])\n', (7446, 7460), True, 'import numpy as np\n'), ((7495, 7515), 'numpy.eye', 'np.eye', (['F_u.shape[0]'], {}), '(F_u.shape[0])\n', (7501, 7515), True, 'import numpy as np\n'), ((7691, 7711), 'numpy.zeros', 'np.zeros', (['(n_x, n_x)'], {}), '((n_x, n_x))\n', (7699, 7711), True, 'import numpy as np\n'), ((7713, 7726), 'numpy.zeros', 'np.zeros', (['n_x'], {}), '(n_x)\n', (7721, 7726), True, 'import numpy as np\n'), ((11345, 11364), 'numpy.arange', 'np.arange', (['n', '(n + m)'], {}), '(n, n + m)\n', (11354, 11364), True, 'import numpy as np\n'), ((2673, 2690), 'numpy.dot', 'np.dot', (['self.F', 'x'], {}), '(self.F, x)\n', (2679, 2690), True, 'import numpy as np\n'), ((3371, 3391), 'numpy.diag', 'np.diag', (['(1.0 / Sigma)'], {}), '(1.0 / Sigma)\n', (3378, 3391), True, 'import numpy as np\n'), ((5253, 5303), 'numpy.linalg.norm', 'np.linalg.norm', (['(V2.T @ (f.P - g.P) @ V2)'], {'ord': '"""fro"""'}), "(V2.T @ (f.P - g.P) @ V2, ord='fro')\n", (5267, 5303), True, 'import numpy as np\n'), ((13750, 13774), 'numpy.zeros', 'np.zeros', (['(n + m, n + m)'], {}), '((n + m, n + m))\n', (13758, 13774), True, 'import numpy as np\n'), ((13776, 13791), 'numpy.zeros', 'np.zeros', (['(n + m)'], {}), '(n + m)\n', (13784, 13791), True, 'import numpy as np\n'), ((13982, 14001), 'numpy.arange', 'np.arange', (['n', '(n + m)'], {}), '(n, n + m)\n', (13991, 14001), True, 'import numpy as np\n'), ((5323, 5386), 'numpy.linalg.norm', 'np.linalg.norm', (['(V2.T @ (f.P @ x0 + f.q - g.P @ x0 - g.q))'], {'ord': '(2)'}), '(V2.T @ (f.P @ x0 + f.q - g.P @ x0 - g.q), ord=2)\n', (5337, 5386), True, 'import numpy as np\n'), ((7334, 7360), 'numpy.zeros', 'np.zeros', (['(self.p, self.p)'], {}), '((self.p, self.p))\n', (7342, 7360), True, 'import numpy as np\n'), ((7909, 7936), 'numpy.eye', 'np.eye', (['KKT_matrix.shape[0]'], {}), '(KKT_matrix.shape[0])\n', (7915, 7936), True, 'import numpy as np\n'), ((8227, 8238), 'numpy.eye', 'np.eye', (['n_u'], {}), '(n_u)\n', (8233, 8238), True, 'import numpy as np\n'), ((8240, 8263), 'numpy.zeros', 'np.zeros', (['(n_u, self.p)'], {}), '((n_u, self.p))\n', (8248, 8263), True, 'import numpy as np\n')] |
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import SimpleITK as sitk
import numpy as np
import shutil
from batchgenerators.utilities.file_and_folder_operations import *
from multiprocessing import Pool
from collections import OrderedDict
def create_nonzero_mask(data):
from scipy.ndimage import binary_fill_holes
assert len(data.shape) == 4 or len(data.shape) == 3, "data must have shape (C, X, Y, Z) or shape (C, X, Y)"
nonzero_mask = np.zeros(data.shape[1:], dtype=bool)
for c in range(data.shape[0]):
this_mask = data[c] != 0
nonzero_mask = nonzero_mask | this_mask
nonzero_mask = binary_fill_holes(nonzero_mask)
return nonzero_mask
def get_bbox_from_mask(mask, outside_value=0):
mask_voxel_coords = np.where(mask != outside_value)
minzidx = int(np.min(mask_voxel_coords[0]))
maxzidx = int(np.max(mask_voxel_coords[0])) + 1
minxidx = int(np.min(mask_voxel_coords[1]))
maxxidx = int(np.max(mask_voxel_coords[1])) + 1
minyidx = int(np.min(mask_voxel_coords[2]))
maxyidx = int(np.max(mask_voxel_coords[2])) + 1
return [[minzidx, maxzidx], [minxidx, maxxidx], [minyidx, maxyidx]]
def crop_to_bbox(image, bbox):
assert len(image.shape) == 3, "only supports 3d images"
resizer = (slice(bbox[0][0], bbox[0][1]), slice(bbox[1][0], bbox[1][1]), slice(bbox[2][0], bbox[2][1]))
return image[resizer]
def get_case_identifier(case):
case_identifier = case[0].split("/")[-1].split(".nii.gz")[0][:-5]
return case_identifier
def get_case_identifier_from_npz(case):
case_identifier = case.split("/")[-1][:-4]
return case_identifier
def load_case_from_list_of_files(data_files, seg_file=None):
assert isinstance(data_files, list) or isinstance(data_files, tuple), "case must be either a list or a tuple"
properties = OrderedDict()
data_itk = [sitk.ReadImage(f) for f in data_files]
properties["original_size_of_raw_data"] = np.array(data_itk[0].GetSize())[[2, 1, 0]]
properties["original_spacing"] = np.array(data_itk[0].GetSpacing())[[2, 1, 0]]
properties["list_of_data_files"] = data_files
properties["seg_file"] = seg_file
properties["itk_origin"] = data_itk[0].GetOrigin()
properties["itk_spacing"] = data_itk[0].GetSpacing()
properties["itk_direction"] = data_itk[0].GetDirection()
data_npy = np.vstack([sitk.GetArrayFromImage(d)[None] for d in data_itk])
if seg_file is not None:
seg_itk = sitk.ReadImage(seg_file)
seg_npy = sitk.GetArrayFromImage(seg_itk)[None].astype(np.float32)
else:
seg_npy = None
return data_npy.astype(np.float32), seg_npy, properties
def crop_to_nonzero(data, seg=None, nonzero_label=-1):
"""
:param data:
:param seg:
:param nonzero_label: this will be written into the segmentation map
:return:
"""
nonzero_mask = create_nonzero_mask(data)
bbox = get_bbox_from_mask(nonzero_mask, 0)
cropped_data = []
for c in range(data.shape[0]):
cropped = crop_to_bbox(data[c], bbox)
cropped_data.append(cropped[None])
data = np.vstack(cropped_data)
if seg is not None:
cropped_seg = []
for c in range(seg.shape[0]):
cropped = crop_to_bbox(seg[c], bbox)
cropped_seg.append(cropped[None])
seg = np.vstack(cropped_seg)
nonzero_mask = crop_to_bbox(nonzero_mask, bbox)[None]
if seg is not None:
seg[(seg == 0) & (nonzero_mask == 0)] = nonzero_label
else:
nonzero_mask = nonzero_mask.astype(int)
nonzero_mask[nonzero_mask == 0] = nonzero_label
nonzero_mask[nonzero_mask > 0] = 0
seg = nonzero_mask
return data, seg, bbox
def get_patient_identifiers_from_cropped_files(folder):
return [i.split("/")[-1][:-4] for i in subfiles(folder, join=True, suffix=".npz")]
class ImageCropper(object):
def __init__(self, num_threads, output_folder=None):
"""
This one finds a mask of nonzero elements (must be nonzero in all modalities) and crops the image to that mask.
In the case of BRaTS and ISLES data this results in a significant reduction in image size
:param num_threads:
:param output_folder: whete to store the cropped data
:param list_of_files:
"""
self.output_folder = output_folder
self.num_threads = num_threads
if self.output_folder is not None:
maybe_mkdir_p(self.output_folder)
@staticmethod
def crop(data, properties, seg=None):
shape_before = data.shape
data, seg, bbox = crop_to_nonzero(data, seg, nonzero_label=-1)
shape_after = data.shape
print("before crop:", shape_before, "after crop:", shape_after, "spacing:",
np.array(properties["original_spacing"]), "\n")
properties["crop_bbox"] = bbox
properties['classes'] = np.unique(seg)
seg[seg < -1] = 0
properties["size_after_cropping"] = data[0].shape
return data, seg, properties
@staticmethod
def crop_from_list_of_files(data_files, seg_file=None):
data, seg, properties = load_case_from_list_of_files(data_files, seg_file)
return ImageCropper.crop(data, properties, seg)
def load_crop_save(self, case, case_identifier, overwrite_existing=False):
try:
print(case_identifier)
if overwrite_existing \
or (not os.path.isfile(os.path.join(self.output_folder, "%s.npz" % case_identifier))
or not os.path.isfile(os.path.join(self.output_folder, "%s.pkl" % case_identifier))):
data, seg, properties = self.crop_from_list_of_files(case[:-1], case[-1])
all_data = np.vstack((data, seg))
np.savez_compressed(os.path.join(self.output_folder, "%s.npz" % case_identifier), data=all_data)
with open(os.path.join(self.output_folder, "%s.pkl" % case_identifier), 'wb') as f:
pickle.dump(properties, f)
except Exception as e:
print("Exception in", case_identifier, ":")
print(e)
raise e
def get_list_of_cropped_files(self):
return subfiles(self.output_folder, join=True, suffix=".npz")
def get_patient_identifiers_from_cropped_files(self):
return [i.split("/")[-1][:-4] for i in self.get_list_of_cropped_files()]
def run_cropping(self, list_of_files, overwrite_existing=False, output_folder=None):
"""
also copied ground truth nifti segmentation into the preprocessed folder so that we can use them for evaluation
on the cluster
:param list_of_files: list of list of files [[PATIENTID_TIMESTEP_0000.nii.gz], [PATIENTID_TIMESTEP_0000.nii.gz]]
:param overwrite_existing:
:param output_folder:
:return:
"""
if output_folder is not None:
self.output_folder = output_folder
output_folder_gt = os.path.join(self.output_folder, "gt_segmentations")
maybe_mkdir_p(output_folder_gt)
for j, case in enumerate(list_of_files):
if case[-1] is not None:
shutil.copy(case[-1], output_folder_gt)
list_of_args = []
for j, case in enumerate(list_of_files):
case_identifier = get_case_identifier(case)
list_of_args.append((case, case_identifier, overwrite_existing))
p = Pool(self.num_threads)
p.starmap(self.load_crop_save, list_of_args)
p.close()
p.join()
def load_properties(self, case_identifier):
with open(os.path.join(self.output_folder, "%s.pkl" % case_identifier), 'rb') as f:
properties = pickle.load(f)
return properties
def save_properties(self, case_identifier, properties):
with open(os.path.join(self.output_folder, "%s.pkl" % case_identifier), 'wb') as f:
pickle.dump(properties, f)
| [
"SimpleITK.ReadImage",
"scipy.ndimage.binary_fill_holes",
"numpy.zeros",
"numpy.unique",
"SimpleITK.GetArrayFromImage",
"shutil.copy",
"numpy.min",
"numpy.where",
"numpy.max",
"numpy.array",
"multiprocessing.Pool",
"collections.OrderedDict",
"numpy.vstack"
] | [((1120, 1156), 'numpy.zeros', 'np.zeros', (['data.shape[1:]'], {'dtype': 'bool'}), '(data.shape[1:], dtype=bool)\n', (1128, 1156), True, 'import numpy as np\n'), ((1296, 1327), 'scipy.ndimage.binary_fill_holes', 'binary_fill_holes', (['nonzero_mask'], {}), '(nonzero_mask)\n', (1313, 1327), False, 'from scipy.ndimage import binary_fill_holes\n'), ((1430, 1461), 'numpy.where', 'np.where', (['(mask != outside_value)'], {}), '(mask != outside_value)\n', (1438, 1461), True, 'import numpy as np\n'), ((2529, 2542), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2540, 2542), False, 'from collections import OrderedDict\n'), ((3831, 3854), 'numpy.vstack', 'np.vstack', (['cropped_data'], {}), '(cropped_data)\n', (3840, 3854), True, 'import numpy as np\n'), ((1481, 1509), 'numpy.min', 'np.min', (['mask_voxel_coords[0]'], {}), '(mask_voxel_coords[0])\n', (1487, 1509), True, 'import numpy as np\n'), ((1583, 1611), 'numpy.min', 'np.min', (['mask_voxel_coords[1]'], {}), '(mask_voxel_coords[1])\n', (1589, 1611), True, 'import numpy as np\n'), ((1685, 1713), 'numpy.min', 'np.min', (['mask_voxel_coords[2]'], {}), '(mask_voxel_coords[2])\n', (1691, 1713), True, 'import numpy as np\n'), ((2560, 2577), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['f'], {}), '(f)\n', (2574, 2577), True, 'import SimpleITK as sitk\n'), ((3173, 3197), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['seg_file'], {}), '(seg_file)\n', (3187, 3197), True, 'import SimpleITK as sitk\n'), ((4059, 4081), 'numpy.vstack', 'np.vstack', (['cropped_seg'], {}), '(cropped_seg)\n', (4068, 4081), True, 'import numpy as np\n'), ((5662, 5676), 'numpy.unique', 'np.unique', (['seg'], {}), '(seg)\n', (5671, 5676), True, 'import numpy as np\n'), ((8264, 8286), 'multiprocessing.Pool', 'Pool', (['self.num_threads'], {}), '(self.num_threads)\n', (8268, 8286), False, 'from multiprocessing import Pool\n'), ((1530, 1558), 'numpy.max', 'np.max', (['mask_voxel_coords[0]'], {}), '(mask_voxel_coords[0])\n', (1536, 1558), True, 'import numpy as np\n'), ((1632, 1660), 'numpy.max', 'np.max', (['mask_voxel_coords[1]'], {}), '(mask_voxel_coords[1])\n', (1638, 1660), True, 'import numpy as np\n'), ((1734, 1762), 'numpy.max', 'np.max', (['mask_voxel_coords[2]'], {}), '(mask_voxel_coords[2])\n', (1740, 1762), True, 'import numpy as np\n'), ((5539, 5579), 'numpy.array', 'np.array', (["properties['original_spacing']"], {}), "(properties['original_spacing'])\n", (5547, 5579), True, 'import numpy as np\n'), ((3072, 3097), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['d'], {}), '(d)\n', (3094, 3097), True, 'import SimpleITK as sitk\n'), ((6533, 6555), 'numpy.vstack', 'np.vstack', (['(data, seg)'], {}), '((data, seg))\n', (6542, 6555), True, 'import numpy as np\n'), ((7995, 8034), 'shutil.copy', 'shutil.copy', (['case[-1]', 'output_folder_gt'], {}), '(case[-1], output_folder_gt)\n', (8006, 8034), False, 'import shutil\n'), ((3217, 3248), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['seg_itk'], {}), '(seg_itk)\n', (3239, 3248), True, 'import SimpleITK as sitk\n')] |
"""
Fold detector inference.
"""
import time
import argparse
from sys import argv
import numpy as np
import torch
from dataset import *
from test.model import Model
from test.utils import *
from nets.detect_net import *
def em_detector(opt):
# Output
fold_out = np.zeros(opt.patch_size + (opt.n_test,), dtype='uint8')
# Load model
model = load_model(opt)
# Load data
test_loader = load_data(opt.test_data, opt)
for i in range(opt.n_test):
t0 = time.time()
sample = test_loader()
pred = forward(model, sample)
mask = pred["mask"].cpu().detach().numpy()
fold_out[:,:,i] = (mask*255).astype('uint8')
# Stats
elapsed = np.round(time.time() - t0, 3)
if (i+1) % 50 == 0 or (i+1) <=10:
print("Iter: " + str(i+1) + ", elapsed time = " + str(elapsed))
h5write(opt.fwd_dir + opt.output_file, fold_out)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--exp_dir", required=True, type=str,
help="Model path")
parser.add_argument("--chkpt_num", required=True, type=int,
help="Model checkpoint number")
parser.add_argument("--input_file", required=True, type=str,
help="Input file to detect folds")
parser.add_argument("--output_file", required=True, type=str,
help="Output filename")
opt = parser.parse_args()
data_dir = ""
TEST = Dataset(os.path.expanduser(data_dir),
{
"image": opt.input_file
}
)
opt.model_dir = opt.exp_dir +'model/'
opt.fwd_dir = opt.exp_dir + 'forward/'
opt.exp_name = 'EM detector inference'
opt.test_data = TEST
opt.mip = 0
opt.patch_size = opt.test_data.image.shape[1:3]
opt.n_test = opt.test_data.image.shape[-1]
opt.net = UNet()
opt.in_spec = ["image"]
opt.out_spec = ["mask"]
# GPUs
opt.gpu_ids = ["0"]
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(opt.gpu_ids)
# Make directories.
if not os.path.isdir(opt.fwd_dir):
os.makedirs(opt.fwd_dir)
# Run inference.
print("Running inference: {}".format(opt.exp_name))
em_detector(opt)
| [
"numpy.zeros",
"argparse.ArgumentParser",
"time.time"
] | [((273, 328), 'numpy.zeros', 'np.zeros', (['(opt.patch_size + (opt.n_test,))'], {'dtype': '"""uint8"""'}), "(opt.patch_size + (opt.n_test,), dtype='uint8')\n", (281, 328), True, 'import numpy as np\n'), ((880, 905), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (903, 905), False, 'import argparse\n'), ((466, 477), 'time.time', 'time.time', ([], {}), '()\n', (475, 477), False, 'import time\n'), ((662, 673), 'time.time', 'time.time', ([], {}), '()\n', (671, 673), False, 'import time\n')] |
# coding=utf-8
# Copyright 2018 XXX Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch XXX model. """
####################################################
# In this template, replace all the XXX (various casings) with your model name
####################################################
import logging
import os
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from .configuration_xxx import XxxConfig
from .file_utils import add_start_docstrings
from .modeling_utils import PreTrainedModel
logger = logging.getLogger(__name__)
####################################################
# This list contrains shortcut names for some of
# the pretrained weights provided with the models
####################################################
XXX_PRETRAINED_MODEL_ARCHIVE_LIST = [
"xxx-base-uncased",
"xxx-large-uncased",
]
####################################################
# This is a conversion method from TF 1.0 to PyTorch
# More details: https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28
####################################################
def load_tf_weights_in_xxx(model, config, tf_checkpoint_path):
""" Load tf checkpoints in a pytorch model.
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info("Skipping {}".format("/".join(name)))
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
####################################################
# PyTorch Models are constructed by sub-classing
# - torch.nn.Module for the layers and
# - PreTrainedModel for the models (itself a sub-class of torch.nn.Module)
####################################################
####################################################
# Here is an example of typical layer in a PyTorch model of the library
# The classes are usually identical to the TF 2.0 ones without the 'TF' prefix.
#
# See the conversion methods in modeling_tf_pytorch_utils.py for more details
####################################################
XxxAttention = nn.Module
XxxIntermediate = nn.Module
XxxOutput = nn.Module
class XxxLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = XxxAttention(config)
self.intermediate = XxxIntermediate(config)
self.output = XxxOutput(config)
def forward(self, hidden_states, attention_mask=None, head_mask=None):
attention_outputs = self.attention(hidden_states, attention_mask, head_mask)
attention_output = attention_outputs[0]
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
return outputs
####################################################
# PreTrainedModel is a sub-class of torch.nn.Module
# which take care of loading and saving pretrained weights
# and various common utilities.
#
# Here you just need to specify a few (self-explanatory)
# pointers for your model and the weights initialization
# method if its not fully covered by PreTrainedModel's default method
####################################################
XxxLayerNorm = torch.nn.LayerNorm
XxxEmbeddings = nn.Module
XxxEncoder = nn.Module
XxxPooler = nn.Module
class XxxPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = XxxConfig
load_tf_weights = load_tf_weights_in_xxx
base_model_prefix = "transformer"
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, XxxLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
XXX_START_DOCSTRING = r""" The XXX model was proposed in
`XXX: Pre-training of Deep Bidirectional Transformers for Language Understanding`_
by <NAME>, <NAME>, <NAME> and <NAME>. It's a bidirectional transformer
pre-trained using a combination of masked language modeling objective and next sentence prediction
on a large corpus comprising the Toronto Book Corpus and Wikipedia.
This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and
refer to the PyTorch documentation for all matter related to general usage and behavior.
.. _`XXX: Pre-training of Deep Bidirectional Transformers for Language Understanding`:
https://arxiv.org/abs/1810.04805
.. _`torch.nn.Module`:
https://pytorch.org/docs/stable/nn.html#module
Parameters:
config (:class:`~transformers.XxxConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
XXX_INPUTS_DOCSTRING = r"""
Inputs:
**input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
To match pre-training, XXX input sequence should be formatted with [CLS] and [SEP] tokens as follows:
(a) For sequence pairs:
``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1``
(b) For single sequences:
``tokens: [CLS] the dog is hairy . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0``
Xxx is a model with absolute position embeddings so it's usually advised to pad the inputs on
the right rather than the left.
Indices can be obtained using :class:`transformers.XxxTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
(see `XXX: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details).
**position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
**head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
**inputs_embeds**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, embedding_dim)``:
Optionally, instead of passing ``input_ids`` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
"""
@add_start_docstrings(
"The bare Xxx Model transformer outputting raw hidden-states without any specific head on top.",
XXX_START_DOCSTRING,
XXX_INPUTS_DOCSTRING,
)
class XxxModel(XxxPreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the output of the last layer of the model.
**pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)``
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during Xxx pretraining. This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = XxxTokenizer.from_pretrained('xxx-base-uncased')
model = XxxModel.from_pretrained('xxx-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
def __init__(self, config):
super().__init__(config)
self.embeddings = XxxEmbeddings(config)
self.encoder = XxxEncoder(config)
self.pooler = XxxPooler(config)
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, new_embeddings):
self.embeddings.word_embeddings = new_embeddings
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, device)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
##################################
# Replace this with your model code
embedding_output = self.embeddings(
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
encoder_outputs = self.encoder(embedding_output, extended_attention_mask, head_mask=head_mask)
sequence_output = encoder_outputs[0]
outputs = (sequence_output,) + encoder_outputs[1:] # add hidden_states and attentions if they are here
return outputs # sequence_output, (hidden_states), (attentions)
@add_start_docstrings(
"""Xxx Model with a `language modeling` head on top. """, XXX_START_DOCSTRING, XXX_INPUTS_DOCSTRING
)
class XxxForMaskedLM(XxxPreTrainedModel):
r"""
**masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for computing the masked language modeling loss.
Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Masked language modeling loss.
**prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = XxxTokenizer.from_pretrained('xxx-base-uncased')
model = XxxForMaskedLM.from_pretrained('xxx-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids, masked_lm_labels=input_ids)
loss, prediction_scores = outputs[:2]
"""
def __init__(self, config):
super().__init__(config)
self.transformer = XxxModel(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
masked_lm_labels=None,
):
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
outputs = (masked_lm_loss,) + outputs
return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions)
@add_start_docstrings(
"""Xxx Model transformer with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
XXX_START_DOCSTRING,
XXX_INPUTS_DOCSTRING,
)
class XxxForSequenceClassification(XxxPreTrainedModel):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the sequence classification/regression loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification (or regression if config.num_labels==1) loss.
**logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``
Classification (or regression if config.num_labels==1) scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = XxxTokenizer.from_pretrained('xxx-base-uncased')
model = XxxForSequenceClassification.from_pretrained('xxx-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = XxxModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), logits, (hidden_states), (attentions)
@add_start_docstrings(
"""Xxx Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
XXX_START_DOCSTRING,
XXX_INPUTS_DOCSTRING,
)
class XxxForTokenClassification(XxxPreTrainedModel):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for computing the token classification loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification loss.
**scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.num_labels)``
Classification scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = XxxTokenizer.from_pretrained('xxx-base-uncased')
model = XxxForTokenClassification.from_pretrained('xxx-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, scores = outputs[:2]
"""
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = XxxModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), scores, (hidden_states), (attentions)
@add_start_docstrings(
"""Xxx Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
XXX_START_DOCSTRING,
XXX_INPUTS_DOCSTRING,
)
class XxxForQuestionAnswering(XxxPreTrainedModel):
r"""
**start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
**end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
**start_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``
Span-start scores (before SoftMax).
**end_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``
Span-end scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = XxxTokenizer.from_pretrained('xxx-base-uncased')
model = XxxForQuestionAnswering.from_pretrained('xxx-large-uncased-whole-word-masking-finetuned-squad')
question, text = "Who was <NAME>?", "<NAME> was a nice puppet"
input_text = "[CLS] " + question + " [SEP] " + text + " [SEP]"
input_ids = tokenizer.encode(input_text)
token_type_ids = [0 if i <= input_ids.index(102) else 1 for i in range(len(input_ids))]
start_scores, end_scores = model(torch.tensor([input_ids]), token_type_ids=torch.tensor([token_type_ids]))
all_tokens = tokenizer.convert_ids_to_tokens(input_ids)
print(' '.join(all_tokens[torch.argmax(start_scores) : torch.argmax(end_scores)+1]))
# a nice puppet
"""
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = XxxModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
):
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (start_logits, end_logits,) + outputs[2:]
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)
| [
"torch.nn.Dropout",
"torch.ones",
"os.path.abspath",
"re.fullmatch",
"re.split",
"torch.nn.MSELoss",
"tensorflow.train.list_variables",
"tensorflow.train.load_variable",
"torch.nn.CrossEntropyLoss",
"numpy.transpose",
"torch.nn.Linear",
"torch.zeros",
"logging.getLogger",
"torch.from_numpy... | [((1054, 1081), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1071, 1081), False, 'import logging\n'), ((2097, 2132), 'os.path.abspath', 'os.path.abspath', (['tf_checkpoint_path'], {}), '(tf_checkpoint_path)\n', (2112, 2132), False, 'import os\n'), ((2258, 2290), 'tensorflow.train.list_variables', 'tf.train.list_variables', (['tf_path'], {}), '(tf_path)\n', (2281, 2290), True, 'import tensorflow as tf\n'), ((2450, 2487), 'tensorflow.train.load_variable', 'tf.train.load_variable', (['tf_path', 'name'], {}), '(tf_path, name)\n', (2472, 2487), True, 'import tensorflow as tf\n'), ((4483, 4506), 'torch.from_numpy', 'torch.from_numpy', (['array'], {}), '(array)\n', (4499, 4506), False, 'import torch\n'), ((18861, 18904), 'torch.nn.Linear', 'nn.Linear', (['config.n_embd', 'config.vocab_size'], {}), '(config.n_embd, config.vocab_size)\n', (18870, 18904), False, 'from torch import nn\n'), ((22541, 22579), 'torch.nn.Dropout', 'nn.Dropout', (['config.hidden_dropout_prob'], {}), '(config.hidden_dropout_prob)\n', (22551, 22579), False, 'from torch import nn\n'), ((22606, 22659), 'torch.nn.Linear', 'nn.Linear', (['config.hidden_size', 'self.config.num_labels'], {}), '(config.hidden_size, self.config.num_labels)\n', (22615, 22659), False, 'from torch import nn\n'), ((26161, 26199), 'torch.nn.Dropout', 'nn.Dropout', (['config.hidden_dropout_prob'], {}), '(config.hidden_dropout_prob)\n', (26171, 26199), False, 'from torch import nn\n'), ((26226, 26274), 'torch.nn.Linear', 'nn.Linear', (['config.hidden_size', 'config.num_labels'], {}), '(config.hidden_size, config.num_labels)\n', (26235, 26274), False, 'from torch import nn\n'), ((31086, 31134), 'torch.nn.Linear', 'nn.Linear', (['config.hidden_size', 'config.num_labels'], {}), '(config.hidden_size, config.num_labels)\n', (31095, 31134), False, 'from torch import nn\n'), ((3090, 3128), 're.fullmatch', 're.fullmatch', (['"""[A-Za-z]+_\\\\d+"""', 'm_name'], {}), "('[A-Za-z]+_\\\\d+', m_name)\n", (3102, 3128), False, 'import re\n'), ((15235, 15273), 'torch.ones', 'torch.ones', (['input_shape'], {'device': 'device'}), '(input_shape, device=device)\n', (15245, 15273), False, 'import torch\n'), ((15338, 15395), 'torch.zeros', 'torch.zeros', (['input_shape'], {'dtype': 'torch.long', 'device': 'device'}), '(input_shape, dtype=torch.long, device=device)\n', (15349, 15395), False, 'import torch\n'), ((19762, 19780), 'torch.nn.CrossEntropyLoss', 'CrossEntropyLoss', ([], {}), '()\n', (19778, 19780), False, 'from torch.nn import CrossEntropyLoss, MSELoss\n'), ((27088, 27106), 'torch.nn.CrossEntropyLoss', 'CrossEntropyLoss', ([], {}), '()\n', (27104, 27106), False, 'from torch.nn import CrossEntropyLoss, MSELoss\n'), ((32613, 32657), 'torch.nn.CrossEntropyLoss', 'CrossEntropyLoss', ([], {'ignore_index': 'ignored_index'}), '(ignore_index=ignored_index)\n', (32629, 32657), False, 'from torch.nn import CrossEntropyLoss, MSELoss\n'), ((3160, 3187), 're.split', 're.split', (['"""_(\\\\d+)"""', 'm_name'], {}), "('_(\\\\d+)', m_name)\n", (3168, 3187), False, 'import re\n'), ((4209, 4228), 'numpy.transpose', 'np.transpose', (['array'], {}), '(array)\n', (4221, 4228), True, 'import numpy as np\n'), ((23550, 23559), 'torch.nn.MSELoss', 'MSELoss', ([], {}), '()\n', (23557, 23559), False, 'from torch.nn import CrossEntropyLoss, MSELoss\n'), ((23671, 23689), 'torch.nn.CrossEntropyLoss', 'CrossEntropyLoss', ([], {}), '()\n', (23687, 23689), False, 'from torch.nn import CrossEntropyLoss, MSELoss\n')] |
# -*- mode: python; coding: utf-8 -*-
# Copyright (c) 2018 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Commonly used utility functions."""
import re
import copy
import warnings
from collections.abc import Iterable
import numpy as np
from scipy.spatial.distance import pdist, squareform
from astropy.time import Time
from astropy.coordinates import Angle
from astropy.utils import iers
from . import _utils
def _str_to_bytes(s):
warnings.warn(
"_str_to_bytes is deprecated and will be removed in pyuvdata version 2.2. "
"For an input string s, this function is a thin wrapper on s.encode('utf8'). "
"The use of encode is preferred over calling this function.",
DeprecationWarning,
)
return s.encode("utf8")
def _bytes_to_str(b):
warnings.warn(
"_bytes_to_str is deprecated and will be removed in pyuvdata version 2.2. "
"For an input string s, this function is a thin wrapper on s.decode('utf8'). "
"The use of decode is preferred over calling this function.",
DeprecationWarning,
)
return b.decode("utf8")
__all__ = [
"POL_STR2NUM_DICT",
"POL_NUM2STR_DICT",
"CONJ_POL_DICT",
"JONES_STR2NUM_DICT",
"JONES_NUM2STR_DICT",
"LatLonAlt_from_XYZ",
"XYZ_from_LatLonAlt",
"rotECEF_from_ECEF",
"ECEF_from_rotECEF",
"ENU_from_ECEF",
"ECEF_from_ENU",
"phase_uvw",
"unphase_uvw",
"uvcalibrate",
"apply_uvflag",
"get_lst_for_time",
"polstr2num",
"polnum2str",
"jstr2num",
"jnum2str",
"parse_polstr",
"parse_jpolstr",
"conj_pol",
"reorder_conj_pols",
"baseline_to_antnums",
"antnums_to_baseline",
"baseline_index_flip",
"get_baseline_redundancies",
"get_antenna_redundancies",
"collapse",
"mean_collapse",
"absmean_collapse",
"quadmean_collapse",
"or_collapse",
"and_collapse",
]
# fmt: off
# polarization constants
# maps polarization strings to polarization integers
POL_STR2NUM_DICT = {"pI": 1, "pQ": 2, "pU": 3, "pV": 4,
"I": 1, "Q": 2, "U": 3, "V": 4, # support straight stokes names
"rr": -1, "ll": -2, "rl": -3, "lr": -4,
"xx": -5, "yy": -6, "xy": -7, "yx": -8}
# maps polarization integers to polarization strings
POL_NUM2STR_DICT = {1: "pI", 2: "pQ", 3: "pU", 4: "pV",
-1: "rr", -2: "ll", -3: "rl", -4: "lr",
-5: "xx", -6: "yy", -7: "xy", -8: "yx"}
# maps how polarizations change when antennas are swapped
CONJ_POL_DICT = {"xx": "xx", "yy": "yy", "xy": "yx", "yx": "xy",
"ee": "ee", "nn": "nn", "en": "ne", "ne": "en",
"rr": "rr", "ll": "ll", "rl": "lr", "lr": "rl",
"I": "I", "Q": "Q", "U": "U", "V": "V",
"pI": "pI", "pQ": "pQ", "pU": "pU", "pV": "pV"}
# maps jones matrix element strings to jones integers
# Add entries that don't start with "J" to allow shorthand versions
JONES_STR2NUM_DICT = {"Jxx": -5, "Jyy": -6, "Jxy": -7, "Jyx": -8,
"xx": -5, "x": -5, "yy": -6, "y": -6, "xy": -7, "yx": -8,
"Jrr": -1, "Jll": -2, "Jrl": -3, "Jlr": -4,
"rr": -1, "r": -1, "ll": -2, "l": -2, "rl": -3, "lr": -4}
# maps jones integers to jones matrix element strings
JONES_NUM2STR_DICT = {-1: "Jrr", -2: "Jll", -3: "Jrl", -4: "Jlr",
-5: "Jxx", -6: "Jyy", -7: "Jxy", -8: "Jyx"}
# maps uvdata pols to input feed polarizations
POL_TO_FEED_DICT = {"xx": ["x", "x"], "yy": ["y", "y"],
"xy": ["x", "y"], "yx": ["y", "x"],
"ee": ["e", "e"], "nn": ["n", "n"],
"en": ["e", "n"], "ne": ["n", "e"],
"rr": ["r", "r"], "ll": ["l", "l"],
"rl": ["r", "l"], "lr": ["l", "r"]}
# fmt: on
def _get_iterable(x):
"""Return iterable version of input."""
if isinstance(x, Iterable):
return x
else:
return (x,)
def _fits_gethduaxis(hdu, axis):
"""
Make axis arrays for fits files.
Parameters
----------
hdu : astropy.io.fits HDU object
The HDU to make an axis array for.
axis : int
The axis number of interest (1-based).
Returns
-------
ndarray of float
Array of values for the specified axis.
"""
ax = str(axis)
axis_num = hdu.header["NAXIS" + ax]
val = hdu.header["CRVAL" + ax]
delta = hdu.header["CDELT" + ax]
index = hdu.header["CRPIX" + ax] - 1
return delta * (np.arange(axis_num) - index) + val
def _fits_indexhdus(hdulist):
"""
Get a dict of table names and HDU numbers from a FITS HDU list.
Parameters
----------
hdulist : list of astropy.io.fits HDU objects
List of HDUs to get names for
Returns
-------
dict
dictionary with table names as keys and HDU number as values.
"""
tablenames = {}
for i in range(len(hdulist)):
try:
tablenames[hdulist[i].header["EXTNAME"]] = i
except (KeyError):
continue
return tablenames
def _get_fits_extra_keywords(header, keywords_to_skip=None):
"""
Get any extra keywords and return as dict.
Parameters
----------
header : FITS header object
header object to get extra_keywords from.
keywords_to_skip : list of str
list of keywords to not include in extra keywords in addition to standard
FITS keywords.
Returns
-------
dict
dict of extra keywords.
"""
# List standard FITS header items that are still should not be included in
# extra_keywords
# These are the beginnings of FITS keywords to ignore, the actual keywords
# often include integers following these names (e.g. NAXIS1, CTYPE3)
std_fits_substrings = [
"HISTORY",
"SIMPLE",
"BITPIX",
"EXTEND",
"BLOCKED",
"GROUPS",
"PCOUNT",
"BSCALE",
"BZERO",
"NAXIS",
"PTYPE",
"PSCAL",
"PZERO",
"CTYPE",
"CRVAL",
"CRPIX",
"CDELT",
"CROTA",
"CUNIT",
]
if keywords_to_skip is not None:
std_fits_substrings.extend(keywords_to_skip)
extra_keywords = {}
# find all the other header items and keep them as extra_keywords
for key in header:
# check if key contains any of the standard FITS substrings
if np.any([sub in key for sub in std_fits_substrings]):
continue
if key == "COMMENT":
extra_keywords[key] = str(header.get(key))
elif key != "":
extra_keywords[key] = header.get(key)
return extra_keywords
def _check_history_version(history, version_string):
"""Check if version_string is present in history string."""
if version_string.replace(" ", "") in history.replace("\n", "").replace(" ", ""):
return True
else:
return False
def _check_histories(history1, history2):
"""Check if two histories are the same."""
if history1.replace("\n", "").replace(" ", "") == history2.replace(
"\n", ""
).replace(" ", ""):
return True
else:
return False
def _combine_histories(history1, history2):
"""Combine histories with minimal repeats."""
hist2_words = history2.split(" ")
add_hist = ""
test_hist1 = " " + history1 + " "
for i, word in enumerate(hist2_words):
if " " + word + " " not in test_hist1:
add_hist += " " + word
keep_going = i + 1 < len(hist2_words)
while keep_going:
if (hist2_words[i + 1] == " ") or (
" " + hist2_words[i + 1] + " " not in test_hist1
):
add_hist += " " + hist2_words[i + 1]
del hist2_words[i + 1]
keep_going = i + 1 < len(hist2_words)
else:
keep_going = False
return history1 + add_hist
def baseline_to_antnums(baseline, Nants_telescope):
"""
Get the antenna numbers corresponding to a given baseline number.
Parameters
----------
baseline : int or array_like of ints
baseline number
Nants_telescope : int
number of antennas
Returns
-------
int or array_like of int
first antenna number(s)
int or array_like of int
second antenna number(s)
"""
if Nants_telescope > 2048:
raise Exception(
"error Nants={Nants}>2048 not supported".format(Nants=Nants_telescope)
)
return_array = isinstance(baseline, (np.ndarray, list, tuple))
ant1, ant2 = _utils.baseline_to_antnums(
np.ascontiguousarray(baseline, dtype=np.int64)
)
if return_array:
return ant1, ant2
else:
return ant1.item(0), ant2.item(0)
def antnums_to_baseline(ant1, ant2, Nants_telescope, attempt256=False):
"""
Get the baseline number corresponding to two given antenna numbers.
Parameters
----------
ant1 : int or array_like of int
first antenna number
ant2 : int or array_like of int
second antenna number
Nants_telescope : int
number of antennas
attempt256 : bool
Option to try to use the older 256 standard used in
many uvfits files (will use 2048 standard if there are more
than 256 antennas). Default is False.
Returns
-------
int or array of int
baseline number corresponding to the two antenna numbers.
"""
if Nants_telescope is not None and Nants_telescope > 2048:
raise Exception(
"cannot convert ant1, ant2 to a baseline index "
"with Nants={Nants}>2048.".format(Nants=Nants_telescope)
)
return_array = isinstance(ant1, (np.ndarray, list, tuple))
baseline = _utils.antnums_to_baseline(
np.ascontiguousarray(ant1, dtype=np.int64),
np.ascontiguousarray(ant2, dtype=np.int64),
attempt256=attempt256,
)
if return_array:
return baseline
else:
return baseline.item(0)
def baseline_index_flip(baseline, Nants_telescope):
"""Change baseline number to reverse antenna order."""
ant1, ant2 = baseline_to_antnums(baseline, Nants_telescope)
return antnums_to_baseline(ant2, ant1, Nants_telescope)
def _x_orientation_rep_dict(x_orientation):
"""Create replacement dict based on x_orientation."""
if x_orientation.lower() == "east" or x_orientation.lower() == "e":
return {"x": "e", "y": "n"}
elif x_orientation.lower() == "north" or x_orientation.lower() == "n":
return {"x": "n", "y": "e"}
else:
raise ValueError("x_orientation not recognized.")
def polstr2num(pol, x_orientation=None):
"""
Convert polarization str to number according to AIPS Memo 117.
Prefer 'pI', 'pQ', 'pU' and 'pV' to make it clear that these are pseudo-Stokes,
not true Stokes, but also supports 'I', 'Q', 'U', 'V'.
Parameters
----------
pol : str
polarization string
x_orientation : str, optional
Orientation of the physical dipole corresponding to what is
labelled as the x polarization ("east" or "north") to allow for
converting from E/N strings. See corresonding parameter on UVData
for more details.
Returns
-------
int
Number corresponding to string
Raises
------
ValueError
If the pol string cannot be converted to a polarization number.
Warns
-----
UserWarning
If the x_orientation not recognized.
"""
dict_use = copy.deepcopy(POL_STR2NUM_DICT)
if x_orientation is not None:
try:
rep_dict = _x_orientation_rep_dict(x_orientation)
for key, value in POL_STR2NUM_DICT.items():
new_key = key.replace("x", rep_dict["x"]).replace("y", rep_dict["y"])
dict_use[new_key] = value
except ValueError:
warnings.warn("x_orientation not recognized.")
poldict = {k.lower(): v for k, v in dict_use.items()}
if isinstance(pol, str):
out = poldict[pol.lower()]
elif isinstance(pol, Iterable):
out = [poldict[key.lower()] for key in pol]
else:
raise ValueError(
"Polarization {p} cannot be converted to a polarization number.".format(
p=pol
)
)
return out
def polnum2str(num, x_orientation=None):
"""
Convert polarization number to str according to AIPS Memo 117.
Uses 'pI', 'pQ', 'pU' and 'pV' to make it clear that these are pseudo-Stokes,
not true Stokes
Parameters
----------
num : int
polarization number
x_orientation : str, optional
Orientation of the physical dipole corresponding to what is
labelled as the x polarization ("east" or "north") to convert to
E/N strings. See corresonding parameter on UVData for more details.
Returns
-------
str
String corresponding to polarization number
Raises
------
ValueError
If the polarization number cannot be converted to a polarization string.
Warns
-----
UserWarning
If the x_orientation not recognized.
"""
dict_use = copy.deepcopy(POL_NUM2STR_DICT)
if x_orientation is not None:
try:
rep_dict = _x_orientation_rep_dict(x_orientation)
for key, value in POL_NUM2STR_DICT.items():
new_val = value.replace("x", rep_dict["x"]).replace("y", rep_dict["y"])
dict_use[key] = new_val
except ValueError:
warnings.warn("x_orientation not recognized.")
if isinstance(num, (int, np.int32, np.int64)):
out = dict_use[num]
elif isinstance(num, Iterable):
out = [dict_use[i] for i in num]
else:
raise ValueError(
"Polarization {p} cannot be converted to string.".format(p=num)
)
return out
def jstr2num(jstr, x_orientation=None):
"""
Convert jones polarization str to number according to calfits memo.
Parameters
----------
jstr : str
antenna (jones) polarization string
x_orientation : str, optional
Orientation of the physical dipole corresponding to what is
labelled as the x polarization ("east" or "north") to allow for
converting from E/N strings. See corresonding parameter on UVData
for more details.
Returns
-------
int
antenna (jones) polarization number corresponding to string
Raises
------
ValueError
If the jones string cannot be converted to a polarization number.
Warns
-----
UserWarning
If the x_orientation not recognized.
"""
dict_use = copy.deepcopy(JONES_STR2NUM_DICT)
if x_orientation is not None:
try:
rep_dict = _x_orientation_rep_dict(x_orientation)
for key, value in JONES_STR2NUM_DICT.items():
new_key = key.replace("x", rep_dict["x"]).replace("y", rep_dict["y"])
dict_use[new_key] = value
except ValueError:
warnings.warn("x_orientation not recognized.")
jdict = {k.lower(): v for k, v in dict_use.items()}
if isinstance(jstr, str):
out = jdict[jstr.lower()]
elif isinstance(jstr, Iterable):
out = [jdict[key.lower()] for key in jstr]
else:
raise ValueError(
"Jones polarization {j} cannot be converted to index.".format(j=jstr)
)
return out
def jnum2str(jnum, x_orientation=None):
"""
Convert jones polarization number to str according to calfits memo.
Parameters
----------
num : int
antenna (jones) polarization number
x_orientation : str, optional
Orientation of the physical dipole corresponding to what is
labelled as the x polarization ("east" or "north") to convert to
E/N strings. See corresonding parameter on UVData for more details.
Returns
-------
str
antenna (jones) polarization string corresponding to number
Raises
------
ValueError
If the jones polarization number cannot be converted to a jones
polarization string.
Warns
-----
UserWarning
If the x_orientation not recognized.
"""
dict_use = copy.deepcopy(JONES_NUM2STR_DICT)
if x_orientation is not None:
try:
rep_dict = _x_orientation_rep_dict(x_orientation)
for key, value in JONES_NUM2STR_DICT.items():
new_val = value.replace("x", rep_dict["x"]).replace("y", rep_dict["y"])
dict_use[key] = new_val
except ValueError:
warnings.warn("x_orientation not recognized.")
if isinstance(jnum, (int, np.int32, np.int64)):
out = dict_use[jnum]
elif isinstance(jnum, Iterable):
out = [dict_use[i] for i in jnum]
else:
raise ValueError(
"Jones polarization {j} cannot be converted to string.".format(j=jnum)
)
return out
def parse_polstr(polstr, x_orientation=None):
"""
Parse a polarization string and return pyuvdata standard polarization string.
See utils.POL_STR2NUM_DICT for options.
Parameters
----------
polstr : str
polarization string
x_orientation : str, optional
Orientation of the physical dipole corresponding to what is
labelled as the x polarization ("east" or "north") to allow for
converting from E/N strings. See corresonding parameter on UVData
for more details.
Returns
-------
str
AIPS Memo 117 standard string
Raises
------
ValueError
If the pol string cannot be converted to a polarization number.
Warns
-----
UserWarning
If the x_orientation not recognized.
"""
return polnum2str(
polstr2num(polstr, x_orientation=x_orientation), x_orientation=x_orientation
)
def parse_jpolstr(jpolstr, x_orientation=None):
"""
Parse a Jones polarization string and return pyuvdata standard jones string.
See utils.JONES_STR2NUM_DICT for options.
Parameters
----------
jpolstr : str
Jones polarization string
Returns
-------
str
calfits memo standard string
Raises
------
ValueError
If the jones string cannot be converted to a polarization number.
Warns
-----
UserWarning
If the x_orientation not recognized.
"""
return jnum2str(
jstr2num(jpolstr, x_orientation=x_orientation), x_orientation=x_orientation
)
def conj_pol(pol):
"""
Return the polarization for the conjugate baseline.
For example, (1, 2, 'xy') = conj(2, 1, 'yx').
The returned polarization is determined by assuming the antenna pair is
reversed in the data, and finding the correct polarization correlation
which will yield the requested baseline when conjugated. Note this means
changing the polarization for linear cross-pols, but keeping auto-pol
(e.g. xx) and Stokes the same.
Parameters
----------
pol : str or int
Polarization string or integer.
Returns
-------
cpol : str or int
Polarization as if antennas are swapped (type matches input)
"""
cpol_dict = {k.lower(): v for k, v in CONJ_POL_DICT.items()}
if isinstance(pol, str):
cpol = cpol_dict[pol.lower()]
elif isinstance(pol, Iterable):
cpol = [conj_pol(p) for p in pol]
elif isinstance(pol, (int, np.int32, np.int64)):
cpol = polstr2num(cpol_dict[polnum2str(pol).lower()])
else:
raise ValueError("Polarization not recognized, cannot be conjugated.")
return cpol
def reorder_conj_pols(pols):
"""
Reorder multiple pols, swapping pols that are conjugates of one another.
For example ('xx', 'xy', 'yx', 'yy') -> ('xx', 'yx', 'xy', 'yy')
This is useful for the _key2inds function in the case where an antenna
pair is specified but the conjugate pair exists in the data. The conjugated
data should be returned in the order of the polarization axis, so after
conjugating the data, the pols need to be reordered.
For example, if a file contains antpair (0, 1) and pols 'xy' and 'yx', but
the user requests antpair (1, 0), they should get:
[(1x, 0y), (1y, 0x)] = [conj(0y, 1x), conj(0x, 1y)]
Parameters
----------
pols : array_like of str or int
Polarization array (strings or ints).
Returns
-------
conj_order : ndarray of int
Indices to reorder polarization array.
"""
if not isinstance(pols, Iterable):
raise ValueError("reorder_conj_pols must be given an array of polarizations.")
cpols = np.array([conj_pol(p) for p in pols]) # Array needed for np.where
conj_order = [np.where(cpols == p)[0][0] if p in cpols else -1 for p in pols]
if -1 in conj_order:
raise ValueError(
"Not all conjugate pols exist in the polarization array provided."
)
return conj_order
def LatLonAlt_from_XYZ(xyz, check_acceptability=True):
"""
Calculate lat/lon/alt from ECEF x,y,z.
Parameters
----------
xyz : ndarray of float
numpy array, shape (Npts, 3), with ECEF x,y,z coordinates.
check_acceptability : bool
Flag to check XYZ coordinates are reasonable.
Returns
-------
latitude : ndarray or float
latitude, numpy array (if Npts > 1) or value (if Npts = 1) in radians
longitude : ndarray or float
longitude, numpy array (if Npts > 1) or value (if Npts = 1) in radians
altitude : ndarray or float
altitude, numpy array (if Npts > 1) or value (if Npts = 1) in meters
"""
# convert to a numpy array
xyz = np.array(xyz)
if xyz.ndim > 1 and xyz.shape[1] != 3:
raise ValueError("The expected shape of ECEF xyz array is (Npts, 3).")
else:
xyz_use = xyz
if xyz_use.ndim == 1:
xyz_use = xyz_use[np.newaxis, :]
# checking for acceptable values
if check_acceptability:
if np.any(np.linalg.norm(xyz_use, axis=1) < 6.35e6) or np.any(
np.linalg.norm(xyz_use, axis=1) > 6.39e6
):
raise ValueError("xyz values should be ECEF x, y, z coordinates in meters")
latitude, longitude, altitude = _utils._latlonalt_from_xyz(
np.ascontiguousarray(xyz_use, dtype=np.float64)
)
if xyz.ndim == 1:
longitude = longitude[0]
latitude = latitude[0]
altitude = altitude[0]
return latitude, longitude, altitude
def XYZ_from_LatLonAlt(latitude, longitude, altitude):
"""
Calculate ECEF x,y,z from lat/lon/alt values.
Parameters
----------
latitude : ndarray or float
latitude, numpy array (if Npts > 1) or value (if Npts = 1) in radians
longitude : ndarray or float
longitude, numpy array (if Npts > 1) or value (if Npts = 1) in radians
altitude : ndarray or float
altitude, numpy array (if Npts > 1) or value (if Npts = 1) in meters
Returns
-------
xyz : ndarray of float
numpy array, shape (Npts, 3), with ECEF x,y,z coordinates.
"""
latitude = np.ascontiguousarray(latitude, dtype=np.float64)
longitude = np.ascontiguousarray(longitude, dtype=np.float64)
altitude = np.ascontiguousarray(altitude, dtype=np.float64)
n_pts = latitude.size
if longitude.size != n_pts:
raise ValueError(
"latitude, longitude and altitude must all have the same length"
)
if altitude.size != n_pts:
raise ValueError(
"latitude, longitude and altitude must all have the same length"
)
return _utils._xyz_from_latlonalt(latitude, longitude, altitude)
def rotECEF_from_ECEF(xyz, longitude):
"""
Get rotated ECEF positions such that the x-axis goes through the longitude.
Miriad and uvfits expect antenna positions in this frame
(with longitude of the array center/telescope location)
Parameters
----------
xyz : ndarray of float
numpy array, shape (Npts, 3), with ECEF x,y,z coordinates.
longitude : float
longitude in radians to rotate coordinates to
(usually the array center/telescope location).
Returns
-------
ndarray of float
Rotated ECEF coordinates, shape (Npts, 3).
"""
angle = -1 * longitude
rot_matrix = np.array(
[
[np.cos(angle), -1 * np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1],
]
)
return rot_matrix.dot(xyz.T).T
def ECEF_from_rotECEF(xyz, longitude):
"""
Calculate ECEF from a rotated ECEF (Inverse of rotECEF_from_ECEF).
Parameters
----------
xyz : ndarray of float
numpy array, shape (Npts, 3), with rotated ECEF x,y,z coordinates.
longitude : float
longitude in radians giving the x direction of the rotated coordinates
(usually the array center/telescope location).
Returns
-------
ndarray of float
ECEF coordinates, shape (Npts, 3).
"""
angle = longitude
rot_matrix = np.array(
[
[np.cos(angle), -1 * np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1],
]
)
return rot_matrix.dot(xyz.T).T
def ENU_from_ECEF(xyz, latitude, longitude, altitude):
"""
Calculate local ENU (east, north, up) coordinates from ECEF coordinates.
Parameters
----------
xyz : ndarray of float
numpy array, shape (Npts, 3), with ECEF x,y,z coordinates.
latitude : float
Latitude of center of ENU coordinates in radians.
longitude : float
Longitude of center of ENU coordinates in radians.
altitude : float
Altitude of center of ENU coordinates in radians.
Returns
-------
ndarray of float
numpy array, shape (Npts, 3), with local ENU coordinates
"""
xyz = np.array(xyz)
if xyz.ndim > 1 and xyz.shape[1] != 3:
raise ValueError("The expected shape of ECEF xyz array is (Npts, 3).")
xyz_in = xyz
if xyz_in.ndim == 1:
xyz_in = xyz_in[np.newaxis, :]
# check that these are sensible ECEF values -- their magnitudes need to be
# on the order of Earth's radius
ecef_magnitudes = np.linalg.norm(xyz_in, axis=1)
sensible_radius_range = (6.35e6, 6.39e6)
if np.any(ecef_magnitudes <= sensible_radius_range[0]) or np.any(
ecef_magnitudes >= sensible_radius_range[1]
):
raise ValueError(
"ECEF vector magnitudes must be on the order of the radius of the earth"
)
enu = _utils._ENU_from_ECEF(
np.ascontiguousarray(xyz_in, dtype=np.float64),
np.ascontiguousarray(latitude, dtype=np.float64),
np.ascontiguousarray(longitude, dtype=np.float64),
np.ascontiguousarray(altitude, dtype=np.float64),
)
if len(xyz.shape) == 1:
enu = np.squeeze(enu)
return enu
def ECEF_from_ENU(enu, latitude, longitude, altitude):
"""
Calculate ECEF coordinates from local ENU (east, north, up) coordinates.
Parameters
----------
enu : ndarray of float
numpy array, shape (Npts, 3), with local ENU coordinates.
latitude : float
Latitude of center of ENU coordinates in radians.
longitude : float
Longitude of center of ENU coordinates in radians.
altitude : float
Altitude of center of ENU coordinates in radians.
Returns
-------
xyz : ndarray of float
numpy array, shape (Npts, 3), with ECEF x,y,z coordinates.
"""
enu = np.array(enu)
if enu.ndim > 1 and enu.shape[1] != 3:
raise ValueError("The expected shape of the ENU array is (Npts, 3).")
enu_use = enu
if enu_use.ndim == 1:
enu_use = enu_use[np.newaxis, :]
xyz = _utils._ECEF_FROM_ENU(
np.ascontiguousarray(enu_use, dtype=np.float64),
np.ascontiguousarray(latitude, dtype=np.float64),
np.ascontiguousarray(longitude, dtype=np.float64),
np.ascontiguousarray(altitude, dtype=np.float64),
)
if len(enu.shape) == 1:
xyz = np.squeeze(xyz)
return xyz
def phase_uvw(ra, dec, initial_uvw):
"""
Calculate phased uvws/positions from unphased ones in an icrs or gcrs frame.
This code expects input uvws or positions relative to the telescope
location in the same frame that ra/dec are in (e.g. icrs or gcrs) and
returns phased ones in the same frame.
Note that this code is nearly identical to ENU_from_ECEF, except that it
uses an arbitrary phasing center rather than a coordinate center.
Parameters
----------
ra : float
Right ascension of phase center.
dec : float
Declination of phase center.
initial_uvw : ndarray of float
Unphased uvws or positions relative to the array center,
shape (Nlocs, 3).
Returns
-------
uvw : ndarray of float
uvw array in the same frame as initial_uvws, ra and dec.
"""
if initial_uvw.ndim == 1:
initial_uvw = initial_uvw[np.newaxis, :]
return _utils._phase_uvw(
np.float64(ra),
np.float64(dec),
np.ascontiguousarray(initial_uvw, dtype=np.float64),
)
def unphase_uvw(ra, dec, uvw):
"""
Calculate unphased uvws/positions from phased ones in an icrs or gcrs frame.
This code expects phased uvws or positions in the same frame that ra/dec
are in (e.g. icrs or gcrs) and returns unphased ones in the same frame.
Parameters
----------
ra : float
Right ascension of phase center.
dec : float
Declination of phase center.
uvw : ndarray of float
Phased uvws or positions relative to the array center,
shape (Nlocs, 3).
Returns
-------
unphased_uvws : ndarray of float
Unphased uvws or positions relative to the array center,
shape (Nlocs, 3).
"""
if uvw.ndim == 1:
uvw = uvw[np.newaxis, :]
return _utils._unphase_uvw(
np.float64(ra), np.float64(dec), np.ascontiguousarray(uvw, dtype=np.float64),
)
def get_lst_for_time(jd_array, latitude, longitude, altitude):
"""
Get the lsts for a set of jd times at an earth location.
Parameters
----------
jd_array : ndarray of float
JD times to get lsts for.
latitude : float
Latitude of location to get lst for in degrees.
longitude : float
Longitude of location to get lst for in degrees.
altitude : float
Altitude of location to get lst for in meters.
Returns
-------
ndarray of float
LSTs in radians corresponding to the jd_array.
"""
lst_array = np.zeros_like(jd_array)
jd, reverse_inds = np.unique(jd_array, return_inverse=True)
times = Time(
jd,
format="jd",
location=(Angle(longitude, unit="deg"), Angle(latitude, unit="deg")),
)
if iers.conf.auto_max_age is None: # pragma: no cover
delta, status = times.get_delta_ut1_utc(return_status=True)
if np.any(
np.isin(status, (iers.TIME_BEFORE_IERS_RANGE, iers.TIME_BEYOND_IERS_RANGE))
):
warnings.warn(
"time is out of IERS range, setting delta ut1 utc to "
"extrapolated value"
)
times.delta_ut1_utc = delta
lst_array = times.sidereal_time("apparent").radian[reverse_inds]
return lst_array
def find_clusters(location_ids, location_vectors, tol):
"""
Find clusters of vectors (e.g. redundand baselines, times).
Parameters
----------
location_ids : array_like of int
ID labels for locations.
location_vectors : array_like of float
location vectors, can be multidimensional
tol : float
tolerance for clusters
Returns
-------
list of list of location_ids
"""
location_vectors = np.asarray(location_vectors)
location_ids = np.asarray(location_ids)
if location_vectors.ndim == 1:
location_vectors = location_vectors[:, np.newaxis]
# For each baseline, list all others that are within the tolerance distance.
adj_triu_mat = pdist(location_vectors) < tol
adj = {} # Adjacency dictionary
for bi, col in enumerate(squareform(adj_triu_mat)):
col[bi] = True
adj[location_ids[bi]] = location_ids[col]
# The adjacency list defines a set of graph edges.
# For each location b0, loop over its adjacency list ai \in adj[b0]
# If adj[b0] is a subset of adj[ai], then ai is in a redundant group with b0
loc_gps = []
for k in adj.keys():
a0 = adj[k]
group = [k]
for a in a0:
if set(a0).issubset(adj[a]) and a not in group:
group.append(a)
group.sort()
loc_gps.append(group)
# Groups can be different lengths, but we need to take a unique over an axis
# to properly identify unique groups
# Pad out all the sub-lists to be the same length
pad = len(max(loc_gps, key=len))
loc_gps = np.array([i + [-1] * (pad - len(i)) for i in loc_gps])
# We end up with multiple copies of each redundant group, so remove duplicates
loc_gps = np.unique(loc_gps, axis=0).tolist()
# remove the dummy pad baselines from each list
loc_gps = [[bl for bl in gp if bl != -1] for gp in loc_gps]
return loc_gps
def get_baseline_redundancies(baselines, baseline_vecs, tol=1.0, with_conjugates=False):
"""
Find redundant baseline groups.
Parameters
----------
baselines : array_like of int
Baseline numbers, shape (Nbls,)
baseline_vecs : array_like of float
Baseline vectors in meters, shape (Nbls, 3)
tol : float
Absolute tolerance of redundancy, in meters.
with_conjugates : bool
Option to include baselines that are redundant when flipped.
Returns
-------
baseline_groups : list of lists of int
list of lists of redundant baseline numbers
vec_bin_centers : list of array_like of float
List of vectors describing redundant group centers
lengths : list of float
List of redundant group baseline lengths in meters
baseline_ind_conj : list of int
List of baselines that are redundant when reversed. Only returned if
with_conjugates is True
"""
Nbls = baselines.shape[0]
if not baseline_vecs.shape == (Nbls, 3):
raise ValueError("Baseline vectors must be shape (Nbls, 3)")
baseline_vecs = copy.copy(baseline_vecs) # Protect the vectors passed in.
if with_conjugates:
conjugates = []
for bv in baseline_vecs:
uneg = bv[0] < -tol
uzer = np.isclose(bv[0], 0.0, atol=tol)
vneg = bv[1] < -tol
vzer = np.isclose(bv[1], 0.0, atol=tol)
wneg = bv[2] < -tol
conjugates.append(uneg or (uzer and vneg) or (uzer and vzer and wneg))
conjugates = np.array(conjugates, dtype=bool)
baseline_vecs[conjugates] *= -1
baseline_ind_conj = baselines[conjugates]
bl_gps, vec_bin_centers, lens = get_baseline_redundancies(
baselines, baseline_vecs, tol=tol, with_conjugates=False
)
return bl_gps, vec_bin_centers, lens, baseline_ind_conj
bl_gps = find_clusters(baselines, baseline_vecs, tol)
n_unique = len(bl_gps)
vec_bin_centers = np.zeros((n_unique, 3))
for gi, gp in enumerate(bl_gps):
inds = [np.where(i == baselines)[0] for i in gp]
vec_bin_centers[gi] = np.mean(baseline_vecs[inds, :], axis=0)
lens = np.sqrt(np.sum(vec_bin_centers ** 2, axis=1))
if np.sum([len(bg) for bg in bl_gps]) > Nbls:
raise ValueError(
"Some baselines are falling into multiple"
" redundant groups. Lower the tolerance to resolve ambiguity."
)
return bl_gps, vec_bin_centers, lens
def get_antenna_redundancies(
antenna_numbers, antenna_positions, tol=1.0, include_autos=False
):
"""
Find redundant baseline groups based on antenna positions.
Parameters
----------
antenna_numbers : array_like of int
Antenna numbers, shape (Nants,).
antenna_positions : array_like of float
Antenna position vectors in the ENU (topocentric) frame in meters,
shape (Nants, 3).
tol : float
Redundancy tolerance in meters.
include_autos : bool
Option to include autocorrelations.
Returns
-------
baseline_groups : list of lists of int
list of lists of redundant baseline numbers
vec_bin_centers : list of array_like of float
List of vectors describing redundant group centers
lengths : list of float
List of redundant group baseline lengths in meters
Notes
-----
The baseline numbers refer to antenna pairs (a1, a2) such that
the baseline vector formed from ENU antenna positions,
blvec = enu[a1] - enu[a2]
is close to the other baselines in the group.
This is achieved by putting baselines in a form of the u>0
convention, but with a tolerance in defining the signs of
vector components.
To guarantee that the same baseline numbers are present in a UVData
object, ``UVData.conjugate_bls('u>0', uvw_tol=tol)``, where `tol` is
the tolerance used here.
"""
Nants = antenna_numbers.size
bls = []
bl_vecs = []
for aj in range(Nants):
mini = aj + 1
if include_autos:
mini = aj
for ai in range(mini, Nants):
anti, antj = antenna_numbers[ai], antenna_numbers[aj]
bidx = antnums_to_baseline(antj, anti, Nants)
bv = antenna_positions[ai] - antenna_positions[aj]
bl_vecs.append(bv)
bls.append(bidx)
bls = np.array(bls)
bl_vecs = np.array(bl_vecs)
gps, vecs, lens, conjs = get_baseline_redundancies(
bls, bl_vecs, tol=tol, with_conjugates=True
)
# Flip the baselines in the groups.
for gi, gp in enumerate(gps):
for bi, bl in enumerate(gp):
if bl in conjs:
gps[gi][bi] = baseline_index_flip(bl, Nants)
return gps, vecs, lens
def mean_collapse(
arr, weights=None, axis=None, return_weights=False, return_weights_square=False
):
"""
Collapse by averaging data.
This is similar to np.average, except it handles infs (by giving them
zero weight) and zero weight axes (by forcing result to be inf with zero
output weight).
Parameters
----------
arr : array
Input array to process.
weights: ndarray, optional
weights for average. If none, will default to equal weight for all
non-infinite data.
axis : int or tuple, optional
Axis or axes to collapse (passed to np.sum). Default is all.
return_weights : bool
Whether to return sum of weights.
return_weights_square: bool
Whether to return the sum of the square of the weights. Default is False.
"""
arr = copy.deepcopy(arr) # avoid changing outside
if weights is None:
weights = np.ones_like(arr)
else:
weights = copy.deepcopy(weights)
weights = weights * np.logical_not(np.isinf(arr))
arr[np.isinf(arr)] = 0
weight_out = np.sum(weights, axis=axis)
if return_weights_square:
weights_square = weights ** 2
weights_square_out = np.sum(weights_square, axis=axis)
out = np.sum(weights * arr, axis=axis)
where = weight_out > 1e-10
out = np.true_divide(out, weight_out, where=where)
out = np.where(where, out, np.inf)
if return_weights and return_weights_square:
return out, weight_out, weights_square_out
elif return_weights:
return out, weight_out
elif return_weights_square:
return out, weights_square_out
else:
return out
def absmean_collapse(
arr, weights=None, axis=None, return_weights=False, return_weights_square=False
):
"""
Collapse by averaging absolute value of data.
Parameters
----------
arr : array
Input array to process.
weights: ndarray, optional
weights for average. If none, will default to equal weight for all
non-infinite data.
axis : int or tuple, optional
Axis or axes to collapse (passed to np.sum). Default is all.
return_weights : bool
Whether to return sum of weights.
return_weights_square: bool
whether to return the sum of the squares of the weights. Default is False.
"""
return mean_collapse(
np.abs(arr),
weights=weights,
axis=axis,
return_weights=return_weights,
return_weights_square=return_weights_square,
)
def quadmean_collapse(
arr, weights=None, axis=None, return_weights=False, return_weights_square=False
):
"""
Collapse by averaging in quadrature.
Parameters
----------
arr : array
Input array to process.
weights: ndarray, optional
weights for average. If none, will default to equal weight for all
non-infinite data.
axis : int or tuple, optional
Axis or axes to collapse (passed to np.sum). Default is all.
return_weights : bool
Whether to return sum of weights.
return_weights_square: bool
whether to return the sum of the squares of the weights. Default is False.
"""
out = mean_collapse(
np.abs(arr) ** 2,
weights=weights,
axis=axis,
return_weights=return_weights,
return_weights_square=return_weights_square,
)
if return_weights and return_weights_square:
return np.sqrt(out[0]), out[1], out[2]
elif return_weights or return_weights_square:
return np.sqrt(out[0]), out[1]
else:
return np.sqrt(out)
def or_collapse(
arr, weights=None, axis=None, return_weights=False, return_weights_square=False
):
"""
Collapse using OR operation.
Parameters
----------
arr : array
Input array to process.
weights: ndarray, optional
NOT USED, but kept for symmetry with other collapsing functions.
axis : int or tuple, optional
Axis or axes to collapse (take OR over). Default is all.
return_weights : bool
Whether to return dummy weights array.
NOTE: the dummy weights will simply be an array of ones
return_weights_square: bool
NOT USED, but kept for symmetry with other collapsing functions.
"""
if arr.dtype != np.bool:
raise ValueError("Input to or_collapse function must be boolean array")
out = np.any(arr, axis=axis)
if (weights is not None) and not np.all(weights == weights.reshape(-1)[0]):
warnings.warn("Currently weights are not handled when OR-ing boolean arrays.")
if return_weights:
return out, np.ones_like(out, dtype=np.float)
else:
return out
def and_collapse(
arr, weights=None, axis=None, return_weights=False, return_weights_square=False
):
"""
Collapse using AND operation.
Parameters
----------
arr : array
Input array to process.
weights: ndarray, optional
NOT USED, but kept for symmetry with other collapsing functions.
axis : int or tuple, optional
Axis or axes to collapse (take AND over). Default is all.
return_weights : bool
Whether to return dummy weights array.
NOTE: the dummy weights will simply be an array of ones
return_weights_square: bool
NOT USED, but kept for symmetry with other collapsing functions.
"""
if arr.dtype != np.bool:
raise ValueError("Input to and_collapse function must be boolean array")
out = np.all(arr, axis=axis)
if (weights is not None) and not np.all(weights == weights.reshape(-1)[0]):
warnings.warn("Currently weights are not handled when AND-ing boolean arrays.")
if return_weights:
return out, np.ones_like(out, dtype=np.float)
else:
return out
def collapse(
arr, alg, weights=None, axis=None, return_weights=False, return_weights_square=False
):
"""
Parent function to collapse an array with a given algorithm.
Parameters
----------
arr : array
Input array to process.
alg : str
Algorithm to use. Must be defined in this function with
corresponding subfunction above.
weights: ndarray, optional
weights for collapse operation (e.g. weighted mean).
NOTE: Some subfunctions do not use the weights. See corresponding
doc strings.
axis : int or tuple, optional
Axis or axes to collapse. Default is all.
return_weights : bool
Whether to return sum of weights.
return_weights_square: bool
Whether to return the sum of the squares of the weights. Default is False.
"""
collapse_dict = {
"mean": mean_collapse,
"absmean": absmean_collapse,
"quadmean": quadmean_collapse,
"or": or_collapse,
"and": and_collapse,
}
try:
out = collapse_dict[alg](
arr,
weights=weights,
axis=axis,
return_weights=return_weights,
return_weights_square=return_weights_square,
)
except KeyError:
raise ValueError(
"Collapse algorithm must be one of: "
+ ", ".join(collapse_dict.keys())
+ "."
)
return out
def uvcalibrate(
uvdata,
uvcal,
inplace=True,
prop_flags=True,
flag_missing=True,
Dterm_cal=False,
delay_convention="minus",
undo=False,
time_check=True,
ant_check=True,
):
"""
Calibrate a UVData object with a UVCal object.
Parameters
----------
uvdata : UVData object
UVData object to calibrate.
uvcal : UVCal object
UVCal object containing the calibration.
inplace : bool, optional
if True edit uvdata in place, else return a calibrated copy
prop_flags : bool, optional
if True, propagate calibration flags to data flags
and doesn't use flagged gains. Otherwise, uses flagged gains and
does not propagate calibration flags to data flags.
flag_missing : bool, optional
Deprecated in favor of ant_check.
If True, flag baselines in uvdata otherwise don't flag and
don't calibrate the baseline if a participating antenna or polarization
is missing in uvcal.
Dterm_cal : bool, optional
Calibrate the off-diagonal terms in the Jones matrix if present
in uvcal. Default is False. Currently not implemented.
delay_convention : str, optional
Exponent sign to use in conversion of 'delay' to 'gain' cal_type
if the input uvcal is not inherently 'gain' cal_type. Default to 'minus'.
undo : bool, optional
If True, undo the provided calibration. i.e. apply the calibration with
flipped gain_convention. Flag propagation rules apply the same.
time_check : bool
Option to check that times match between the UVCal and UVData
objects if UVCal has a single time or time range. Times are always
checked if UVCal has multiple times.
ant_check : bool
Option to check that all antennas with data on the UVData
object have calibration solutions in the UVCal object. If this option is
set to False, uvcalibrate will proceed without erroring and data for
antennas without calibrations will be flagged.
Returns
-------
UVData, optional
Returns if not inplace
"""
if not inplace:
uvdata = uvdata.copy()
# Check whether the UVData antennas *that have data associated with them*
# have associated data in the UVCal object
uvdata_unique_nums = np.unique(np.append(uvdata.ant_1_array, uvdata.ant_2_array))
uvdata.antenna_names = np.asarray(uvdata.antenna_names)
uvdata_used_antnames = np.array(
[
uvdata.antenna_names[np.where(uvdata.antenna_numbers == antnum)][0]
for antnum in uvdata_unique_nums
]
)
uvcal_unique_nums = np.unique(uvcal.ant_array)
uvcal.antenna_names = np.asarray(uvcal.antenna_names)
uvcal_used_antnames = np.array(
[
uvcal.antenna_names[np.where(uvcal.antenna_numbers == antnum)][0]
for antnum in uvcal_unique_nums
]
)
ant_arr_match = uvcal_used_antnames.tolist() == uvdata_used_antnames.tolist()
if not ant_arr_match:
# check more carefully
name_missing = []
for this_ant_name in uvdata_used_antnames:
wh_ant_match = np.nonzero(uvcal_used_antnames == this_ant_name)
if wh_ant_match[0].size == 0:
name_missing.append(this_ant_name)
use_ant_nums = False
if len(name_missing) > 0:
if len(name_missing) == uvdata_used_antnames.size:
# all antenna_names with data on UVData are missing on UVCal.
if not ant_check:
warnings.warn(
"All antenna names with data on UVData are missing "
"on UVCal. Since ant_check is False, calibration will "
"proceed but all data will be flagged."
)
else:
# this entire clause will be replaced with just raising a
# ValueError in version 2.2
# old behavior only required that antenna numbers were present,
# not names. Check numbers
number_missing = []
for this_ant_name in uvdata_used_antnames:
uvdata_ant_num = uvdata.antenna_numbers[
np.where(uvdata.antenna_names == this_ant_name)[0][0]
]
if uvdata_ant_num not in uvcal_unique_nums:
number_missing.append(this_ant_name)
if len(number_missing) == 0:
# all have matching numbers on UVCal
use_ant_nums = True
warnings.warn(
"All antenna names with data on UVData are missing "
"on UVCal. They do all have matching antenna numbers on "
"UVCal. Currently the data will be calibrated using the "
"matching antenna number, but that will be deprecated in "
"version 2.2 and this will become an error.",
DeprecationWarning,
)
elif len(number_missing) < len(name_missing):
# Some have matching numbers on UVCal
use_ant_nums = True
both_missing = sorted(set(number_missing) & set(name_missing))
only_name_missing = sorted(
set(name_missing) - set(number_missing)
)
warnings.warn(
f"Antennas {only_name_missing} have data on UVData but "
"are missing on UVCal. They do have matching antenna "
"numbers on UVCal. Currently the data for these antennas "
"will be calibrated using the matching antenna number, "
"but that will be deprecated in "
"version 2.2 and this will become an error.",
DeprecationWarning,
)
if flag_missing is True:
warnings.warn(
f"Antennas {both_missing} have data on UVData but "
"are missing on UVCal. Currently calibration will "
"proceed and since flag_missing is True, the data "
"for these antennas will be flagged. This will "
"become an error in version 2.2, to continue "
"calibration and flag missing antennas in the "
"future, set ant_check=False.",
DeprecationWarning,
)
else:
warnings.warn(
f"Antennas {both_missing} have data on UVData but "
"are missing on UVCal. Currently calibration will "
"proceed and since flag_missing is False, the data "
"for these antennas will not be calibrated or "
"flagged. This will become an error in version 2.2, "
"to continue calibration and flag missing "
"antennas in the future, set ant_check=False.",
DeprecationWarning,
)
else:
# Only some antenna_names with data on UVData are missing on UVCal
if not ant_check:
warnings.warn(
f"Antennas {name_missing} have data on UVData but are missing "
"on UVCal. Since ant_check is False, calibration will "
"proceed and the data for these antennas will be flagged."
)
else:
# this entire clause will be replaced with just raising a
# ValueError in version 2.2
if flag_missing is True:
warnings.warn(
f"Antennas {name_missing} have data on UVData but "
"are missing on UVCal. Currently calibration will "
"proceed and since flag_missing is True, the data "
"for these antennas will be flagged. This will "
"become an error in version 2.2, to continue "
"calibration and flag missing antennas in the "
"future, set ant_check=False.",
DeprecationWarning,
)
else:
warnings.warn(
f"Antennas {name_missing} have data on UVData but "
"are missing on UVCal. Currently calibration will "
"proceed and since flag_missing is False, the data "
"for these antennas will not be calibrated or "
"flagged. This will become an error in version 2.2, "
"to continue calibration and flag missing "
"antennas in the future, set ant_check=False.",
DeprecationWarning,
)
uvdata_times = np.unique(uvdata.time_array)
downselect_cal_times = False
if uvcal.Ntimes > 1:
if uvcal.Ntimes < uvdata.Ntimes:
raise ValueError(
"The uvcal object has more than one time but fewer than the "
"number of unique times on the uvdata object."
)
uvcal_times = np.unique(uvcal.time_array)
try:
time_arr_match = np.allclose(
uvcal_times,
uvdata_times,
atol=uvdata._time_array.tols[1],
rtol=uvdata._time_array.tols[0],
)
except ValueError:
time_arr_match = False
if not time_arr_match:
# check more carefully
uvcal_times_to_keep = []
for this_time in uvdata_times:
wh_time_match = np.nonzero(
np.isclose(
uvcal.time_array - this_time,
0,
atol=uvdata._time_array.tols[1],
rtol=uvdata._time_array.tols[0],
)
)
if wh_time_match[0].size > 0:
uvcal_times_to_keep.append(uvcal.time_array[wh_time_match][0])
else:
warnings.warn(
f"Time {this_time} exists on UVData but not on UVCal. "
"This will become an error in version 2.2",
DeprecationWarning,
)
if len(uvcal_times_to_keep) < uvcal.Ntimes:
downselect_cal_times = True
elif uvcal.time_range is None:
# only one UVCal time, no time_range.
# This cannot match if UVData.Ntimes > 1.
# If they are both NTimes = 1, then check if they're close.
if uvdata.Ntimes > 1 or not np.isclose(
uvdata_times,
uvcal.time_array,
atol=uvdata._time_array.tols[1],
rtol=uvdata._time_array.tols[0],
):
if not time_check:
warnings.warn(
"Times do not match between UVData and UVCal "
"but time_check is False, so calibration "
"will be applied anyway."
)
else:
warnings.warn(
"Times do not match between UVData and UVCal. "
"Set time_check=False to apply calibration anyway. "
"This will become an error in version 2.2",
DeprecationWarning,
)
else:
# time_array is length 1 and time_range exists: check uvdata_times in time_range
if (
np.min(uvdata_times) < uvcal.time_range[0]
or np.max(uvdata_times) > uvcal.time_range[1]
):
if not time_check:
warnings.warn(
"Times do not match between UVData and UVCal "
"but time_check is False, so calibration "
"will be applied anyway."
)
else:
warnings.warn(
"Times do not match between UVData and UVCal. "
"Set time_check=False to apply calibration anyway. "
"This will become an error in version 2.2",
DeprecationWarning,
)
downselect_cal_freq = False
try:
freq_arr_match = np.allclose(
np.sort(uvcal.freq_array[0, :]),
np.sort(uvdata.freq_array[0, :]),
atol=uvdata._freq_array.tols[1],
rtol=uvdata._freq_array.tols[0],
)
except ValueError:
freq_arr_match = False
if freq_arr_match is False:
# check more carefully
uvcal_freqs_to_keep = []
for this_freq in uvdata.freq_array[0, :]:
wh_freq_match = np.nonzero(
np.isclose(
uvcal.freq_array - this_freq,
0,
atol=uvdata._freq_array.tols[1],
rtol=uvdata._freq_array.tols[0],
)
)
if wh_freq_match[0].size > 0:
uvcal_freqs_to_keep.append(uvcal.freq_array[wh_freq_match][0])
else:
warnings.warn(
f"Frequency {this_freq} exists on UVData but not on UVCal. "
"This will become an error in version 2.2",
DeprecationWarning,
)
if len(uvcal_freqs_to_keep) < uvcal.Nfreqs:
downselect_cal_freq = True
uvdata_pol_strs = polnum2str(
uvdata.polarization_array, x_orientation=uvdata.x_orientation
)
uvcal_pol_strs = jnum2str(uvcal.jones_array, x_orientation=uvcal.x_orientation)
uvdata_feed_pols = {
feed for pol in uvdata_pol_strs for feed in POL_TO_FEED_DICT[pol]
}
for feed in uvdata_feed_pols:
# get diagonal jones str
jones_str = parse_jpolstr(feed, x_orientation=uvcal.x_orientation)
if jones_str not in uvcal_pol_strs:
warnings.warn(
f"Feed polarization {feed} exists on UVData but not on UVCal. "
"This will become an error in version 2.2",
DeprecationWarning,
)
# downselect UVCal times, frequencies
if downselect_cal_freq or downselect_cal_times:
if not downselect_cal_times:
uvcal_times_to_keep = None
elif not downselect_cal_freq:
uvcal_freqs_to_keep = None
# handle backwards compatibility: prevent downselecting to nothing
# or to shapes that don't match
if downselect_cal_times and len(uvcal_times_to_keep) < uvdata.Ntimes:
downselect_cal_times = False
uvcal_times_to_keep = None
if downselect_cal_freq and len(uvcal_freqs_to_keep) < uvdata.Nfreqs:
downselect_cal_freq = False
uvcal_freqs_to_keep = None
if downselect_cal_freq or downselect_cal_times:
uvcal_use = uvcal.select(
times=uvcal_times_to_keep, frequencies=uvcal_freqs_to_keep, inplace=False
)
new_uvcal = True
else:
uvcal_use = uvcal
new_uvcal = False
# input checks
if uvcal_use.cal_type == "delay":
if not new_uvcal:
# make a copy to convert to gain
uvcal_use = uvcal_use.copy()
new_uvcal = True
uvcal_use.convert_to_gain(delay_convention=delay_convention)
# D-term calibration
if Dterm_cal:
# check for D-terms
if -7 not in uvcal_use.jones_array and -8 not in uvcal_use.jones_array:
raise ValueError(
"Cannot apply D-term calibration without -7 or -8"
"Jones polarization in uvcal object."
)
raise NotImplementedError("D-term calibration is not yet implemented.")
# No D-term calibration
else:
# key is number, value is name
uvdata_ant_dict = dict(zip(uvdata.antenna_numbers, uvdata.antenna_names))
# opposite: key is name, value is number
uvcal_ant_dict = dict(zip(uvcal.antenna_names, uvcal.antenna_numbers))
# iterate over keys
for key in uvdata.get_antpairpols():
# get indices for this key
blt_inds = uvdata.antpair2ind(key)
pol_ind = np.argmin(
np.abs(
uvdata.polarization_array - polstr2num(key[2], uvdata.x_orientation)
)
)
# try to get gains for each antenna
ant1_num = key[0]
ant2_num = key[1]
feed1, feed2 = POL_TO_FEED_DICT[key[2]]
try:
uvcal_ant1_num = uvcal_ant_dict[uvdata_ant_dict[ant1_num]]
except KeyError:
if use_ant_nums:
# backwards compatibility: use antenna numbers instead
# this will be removed in version 2.2
uvcal_ant1_num = ant1_num
else:
uvcal_ant1_num = None
try:
uvcal_ant2_num = uvcal_ant_dict[uvdata_ant_dict[ant2_num]]
except KeyError:
if use_ant_nums:
# backwards compatibility: use antenna numbers instead
# this will be removed in version 2.2
uvcal_ant2_num = ant2_num
else:
uvcal_ant2_num = None
uvcal_key1 = (uvcal_ant1_num, feed1)
uvcal_key2 = (uvcal_ant2_num, feed2)
if uvcal_ant1_num is None or uvcal_ant2_num is None:
uvdata.flag_array[blt_inds, 0, :, pol_ind] = True
continue
elif not uvcal_use._has_key(*uvcal_key1) or not uvcal_use._has_key(
*uvcal_key2
):
if flag_missing:
uvdata.flag_array[blt_inds, 0, :, pol_ind] = True
continue
gain = (
uvcal_use.get_gains(uvcal_key1)
* np.conj(uvcal_use.get_gains(uvcal_key2))
).T # tranpose to match uvdata shape
flag = (uvcal_use.get_flags(uvcal_key1) | uvcal_use.get_flags(uvcal_key2)).T
# propagate flags
if prop_flags:
mask = np.isclose(gain, 0.0) | flag
gain[mask] = 1.0
uvdata.flag_array[blt_inds, 0, :, pol_ind] += mask
# apply to data
mult_gains = uvcal_use.gain_convention == "multiply"
if undo:
mult_gains = not mult_gains
if mult_gains:
uvdata.data_array[blt_inds, 0, :, pol_ind] *= gain
else:
uvdata.data_array[blt_inds, 0, :, pol_ind] /= gain
# update attributes
uvdata.history += "\nCalibrated with pyuvdata.utils.uvcalibrate."
if undo:
uvdata.vis_units = "UNCALIB"
else:
if uvcal_use.gain_scale is not None:
uvdata.vis_units = uvcal_use.gain_scale
if not inplace:
return uvdata
def apply_uvflag(
uvd, uvf, inplace=True, unflag_first=False, flag_missing=True, force_pol=True
):
"""
Apply flags from a UVFlag to a UVData instantiation.
Note that if uvf.Nfreqs or uvf.Ntimes is 1, it will broadcast flags across
that axis.
Parameters
----------
uvd : UVData object
UVData object to add flags to.
uvf : UVFlag object
A UVFlag object in flag mode.
inplace : bool
If True overwrite flags in uvd, otherwise return new object
unflag_first : bool
If True, completely unflag the UVData before applying flags.
Else, OR the inherent uvd flags with uvf flags.
flag_missing : bool
If input uvf is a baseline type and antpairs in uvd do not exist in uvf,
flag them in uvd. Otherwise leave them untouched.
force_pol : bool
If True, broadcast flags to all polarizations if they do not match.
Only works if uvf.Npols == 1.
Returns
-------
UVData
If not inplace, returns new UVData object with flags applied
"""
# assertions
if uvf.mode != "flag":
raise ValueError("UVFlag must be flag mode")
if not inplace:
uvd = uvd.copy()
# make a deepcopy by default b/c it is generally edited inplace downstream
uvf = uvf.copy()
# convert to baseline type
if uvf.type != "baseline":
# edits inplace
uvf.to_baseline(uvd, force_pol=force_pol)
else:
# make sure polarizations match or force_pol
uvd_pols, uvf_pols = (
uvd.polarization_array.tolist(),
uvf.polarization_array.tolist(),
)
if set(uvd_pols) != set(uvf_pols):
if uvf.Npols == 1 and force_pol:
# if uvf is 1pol we can make them match: also edits inplace
uvf.polarization_array = uvd.polarization_array
uvf.Npols = len(uvf.polarization_array)
uvf_pols = uvf.polarization_array.tolist()
else:
raise ValueError("Input uvf and uvd polarizations do not match")
# make sure polarization ordering is correct: also edits inplace
uvf.polarization_array = uvf.polarization_array[
[uvd_pols.index(pol) for pol in uvf_pols]
]
# check time and freq shapes match: if Ntimes or Nfreqs is 1, allow
# implicit broadcasting
if uvf.Ntimes == 1:
mismatch_times = False
elif uvf.Ntimes == uvd.Ntimes:
tdiff = np.unique(uvf.time_array) - np.unique(uvd.time_array)
mismatch_times = np.any(tdiff > np.max(np.abs(uvf._time_array.tols)))
else:
mismatch_times = True
if mismatch_times:
raise ValueError("UVFlag and UVData have mismatched time arrays.")
if uvf.Nfreqs == 1:
mismatch_freqs = False
elif uvf.Nfreqs == uvd.Nfreqs:
fdiff = np.unique(uvf.freq_array) - np.unique(uvd.freq_array)
mismatch_freqs = np.any(fdiff > np.max(np.abs(uvf._freq_array.tols)))
else:
mismatch_freqs = True
if mismatch_freqs:
raise ValueError("UVFlag and UVData have mismatched frequency arrays.")
# unflag if desired
if unflag_first:
uvd.flag_array[:] = False
# iterate over antpairs and apply flags: TODO need to be able to handle
# conjugated antpairs
uvf_antpairs = uvf.get_antpairs()
for ap in uvd.get_antpairs():
uvd_ap_inds = uvd.antpair2ind(ap)
if ap not in uvf_antpairs:
if flag_missing:
uvd.flag_array[uvd_ap_inds] = True
continue
uvf_ap_inds = uvf.antpair2ind(*ap)
# addition of boolean is OR
uvd.flag_array[uvd_ap_inds] += uvf.flag_array[uvf_ap_inds]
uvd.history += "\nFlagged with pyuvdata.utils.apply_uvflags."
if not inplace:
return uvd
def parse_ants(uv, ant_str, print_toggle=False, x_orientation=None):
"""
Get antpair and polarization from parsing an aipy-style ant string.
Used to support the the select function.
Generates two lists of antenna pair tuples and polarization indices based
on parsing of the string ant_str. If no valid polarizations (pseudo-Stokes
params, or combinations of [lr] or [xy]) or antenna numbers are found in
ant_str, ant_pairs_nums and polarizations are returned as None.
Parameters
----------
uv : UVBase Object
A UVBased object that supports the following functions and parameters:
- get_ants
- get_antpairs
- get_pols
These are used to construct the baseline ant_pair_nums
and polarizations returned.
ant_str : str
String containing antenna information to parse. Can be 'all',
'auto', 'cross', or combinations of antenna numbers and polarization
indicators 'l' and 'r' or 'x' and 'y'. Minus signs can also be used
in front of an antenna number or baseline to exclude it from being
output in ant_pairs_nums. If ant_str has a minus sign as the first
character, 'all,' will be appended to the beginning of the string.
See the tutorial for examples of valid strings and their behavior.
print_toggle : bool
Boolean for printing parsed baselines for a visual user check.
x_orientation : str, optional
Orientation of the physical dipole corresponding to what is
labelled as the x polarization ("east" or "north") to allow for
converting from E/N strings. If input uv object has an `x_orientation`
parameter and the input to this function is `None`, the value from the
object will be used. Any input given to this function will override the
value on the uv object. See corresonding parameter on UVData
for more details.
Returns
-------
ant_pairs_nums : list of tuples of int or None
List of tuples containing the parsed pairs of antenna numbers, or
None if ant_str is 'all' or a pseudo-Stokes polarizations.
polarizations : list of int or None
List of desired polarizations or None if ant_str does not contain a
polarization specification.
"""
required_attrs = ["get_ants", "get_antpairs", "get_pols"]
if not all(hasattr(uv, attr) for attr in required_attrs):
raise ValueError(
"UVBased objects must have all the following attributes in order "
f"to call 'parse_ants': {required_attrs}."
)
if x_orientation is None and (
hasattr(uv, "x_orientation") and uv.x_orientation is not None
):
x_orientation = uv.x_orientation
ant_re = r"(\(((-?\d+[lrxy]?,?)+)\)|-?\d+[lrxy]?)"
bl_re = "(^(%s_%s|%s),?)" % (ant_re, ant_re, ant_re)
str_pos = 0
ant_pairs_nums = []
polarizations = []
ants_data = uv.get_ants()
ant_pairs_data = uv.get_antpairs()
pols_data = uv.get_pols()
warned_ants = []
warned_pols = []
if ant_str.startswith("-"):
ant_str = "all," + ant_str
while str_pos < len(ant_str):
m = re.search(bl_re, ant_str[str_pos:])
if m is None:
if ant_str[str_pos:].upper().startswith("ALL"):
if len(ant_str[str_pos:].split(",")) > 1:
ant_pairs_nums = uv.get_antpairs()
elif ant_str[str_pos:].upper().startswith("AUTO"):
for pair in ant_pairs_data:
if pair[0] == pair[1] and pair not in ant_pairs_nums:
ant_pairs_nums.append(pair)
elif ant_str[str_pos:].upper().startswith("CROSS"):
for pair in ant_pairs_data:
if not (pair[0] == pair[1] or pair in ant_pairs_nums):
ant_pairs_nums.append(pair)
elif ant_str[str_pos:].upper().startswith("PI"):
polarizations.append(polstr2num("pI"))
elif ant_str[str_pos:].upper().startswith("PQ"):
polarizations.append(polstr2num("pQ"))
elif ant_str[str_pos:].upper().startswith("PU"):
polarizations.append(polstr2num("pU"))
elif ant_str[str_pos:].upper().startswith("PV"):
polarizations.append(polstr2num("pV"))
else:
raise ValueError("Unparsible argument {s}".format(s=ant_str))
comma_cnt = ant_str[str_pos:].find(",")
if comma_cnt >= 0:
str_pos += comma_cnt + 1
else:
str_pos = len(ant_str)
else:
m = m.groups()
str_pos += len(m[0])
if m[2] is None:
ant_i_list = [m[8]]
ant_j_list = list(uv.get_ants())
else:
if m[3] is None:
ant_i_list = [m[2]]
else:
ant_i_list = m[3].split(",")
if m[6] is None:
ant_j_list = [m[5]]
else:
ant_j_list = m[6].split(",")
for ant_i in ant_i_list:
include_i = True
if type(ant_i) == str and ant_i.startswith("-"):
ant_i = ant_i[1:] # nibble the - off the string
include_i = False
for ant_j in ant_j_list:
include_j = True
if type(ant_j) == str and ant_j.startswith("-"):
ant_j = ant_j[1:]
include_j = False
pols = None
ant_i, ant_j = str(ant_i), str(ant_j)
if not ant_i.isdigit():
ai = re.search(r"(\d+)([x,y,l,r])", ant_i).groups()
if not ant_j.isdigit():
aj = re.search(r"(\d+)([x,y,l,r])", ant_j).groups()
if ant_i.isdigit() and ant_j.isdigit():
ai = [ant_i, ""]
aj = [ant_j, ""]
elif ant_i.isdigit() and not ant_j.isdigit():
if "x" in ant_j or "y" in ant_j:
pols = ["x" + aj[1], "y" + aj[1]]
else:
pols = ["l" + aj[1], "r" + aj[1]]
ai = [ant_i, ""]
elif not ant_i.isdigit() and ant_j.isdigit():
if "x" in ant_i or "y" in ant_i:
pols = [ai[1] + "x", ai[1] + "y"]
else:
pols = [ai[1] + "l", ai[1] + "r"]
aj = [ant_j, ""]
elif not ant_i.isdigit() and not ant_j.isdigit():
pols = [ai[1] + aj[1]]
ant_tuple = (abs(int(ai[0])), abs(int(aj[0])))
# Order tuple according to order in object
if ant_tuple in ant_pairs_data:
pass
elif ant_tuple[::-1] in ant_pairs_data:
ant_tuple = ant_tuple[::-1]
else:
if not (
ant_tuple[0] in ants_data or ant_tuple[0] in warned_ants
):
warned_ants.append(ant_tuple[0])
if not (
ant_tuple[1] in ants_data or ant_tuple[1] in warned_ants
):
warned_ants.append(ant_tuple[1])
if pols is not None:
for pol in pols:
if not (pol.lower() in pols_data or pol in warned_pols):
warned_pols.append(pol)
continue
if include_i and include_j:
if ant_tuple not in ant_pairs_nums:
ant_pairs_nums.append(ant_tuple)
if pols is not None:
for pol in pols:
if (
pol.lower() in pols_data
and polstr2num(pol, x_orientation=x_orientation)
not in polarizations
):
polarizations.append(
polstr2num(pol, x_orientation=x_orientation)
)
elif not (
pol.lower() in pols_data or pol in warned_pols
):
warned_pols.append(pol)
else:
if pols is not None:
for pol in pols:
if pol.lower() in pols_data:
if uv.Npols == 1 and [pol.lower()] == pols_data:
ant_pairs_nums.remove(ant_tuple)
if (
polstr2num(pol, x_orientation=x_orientation)
in polarizations
):
polarizations.remove(
polstr2num(
pol, x_orientation=x_orientation,
)
)
elif not (
pol.lower() in pols_data or pol in warned_pols
):
warned_pols.append(pol)
elif ant_tuple in ant_pairs_nums:
ant_pairs_nums.remove(ant_tuple)
if ant_str.upper() == "ALL":
ant_pairs_nums = None
elif len(ant_pairs_nums) == 0:
if not ant_str.upper() in ["AUTO", "CROSS"]:
ant_pairs_nums = None
if len(polarizations) == 0:
polarizations = None
else:
polarizations.sort(reverse=True)
if print_toggle:
print("\nParsed antenna pairs:")
if ant_pairs_nums is not None:
for pair in ant_pairs_nums:
print(pair)
print("\nParsed polarizations:")
if polarizations is not None:
for pol in polarizations:
print(polnum2str(pol, x_orientation=x_orientation))
if len(warned_ants) > 0:
warnings.warn(
"Warning: Antenna number {a} passed, but not present "
"in the ant_1_array or ant_2_array".format(
a=(",").join(map(str, warned_ants))
)
)
if len(warned_pols) > 0:
warnings.warn(
"Warning: Polarization {p} is not present in "
"the polarization_array".format(p=(",").join(warned_pols).upper())
)
return ant_pairs_nums, polarizations
| [
"numpy.isin",
"numpy.sum",
"numpy.abs",
"numpy.allclose",
"numpy.isclose",
"numpy.mean",
"numpy.linalg.norm",
"scipy.spatial.distance.pdist",
"numpy.arange",
"numpy.sin",
"numpy.float64",
"astropy.coordinates.Angle",
"numpy.unique",
"numpy.zeros_like",
"numpy.true_divide",
"numpy.appen... | [((469, 723), 'warnings.warn', 'warnings.warn', (['"""_str_to_bytes is deprecated and will be removed in pyuvdata version 2.2. For an input string s, this function is a thin wrapper on s.encode(\'utf8\'). The use of encode is preferred over calling this function."""', 'DeprecationWarning'], {}), '(\n "_str_to_bytes is deprecated and will be removed in pyuvdata version 2.2. For an input string s, this function is a thin wrapper on s.encode(\'utf8\'). The use of encode is preferred over calling this function."\n , DeprecationWarning)\n', (482, 723), False, 'import warnings\n'), ((815, 1069), 'warnings.warn', 'warnings.warn', (['"""_bytes_to_str is deprecated and will be removed in pyuvdata version 2.2. For an input string s, this function is a thin wrapper on s.decode(\'utf8\'). The use of decode is preferred over calling this function."""', 'DeprecationWarning'], {}), '(\n "_bytes_to_str is deprecated and will be removed in pyuvdata version 2.2. For an input string s, this function is a thin wrapper on s.decode(\'utf8\'). The use of decode is preferred over calling this function."\n , DeprecationWarning)\n', (828, 1069), False, 'import warnings\n'), ((11696, 11727), 'copy.deepcopy', 'copy.deepcopy', (['POL_STR2NUM_DICT'], {}), '(POL_STR2NUM_DICT)\n', (11709, 11727), False, 'import copy\n'), ((13355, 13386), 'copy.deepcopy', 'copy.deepcopy', (['POL_NUM2STR_DICT'], {}), '(POL_NUM2STR_DICT)\n', (13368, 13386), False, 'import copy\n'), ((14865, 14898), 'copy.deepcopy', 'copy.deepcopy', (['JONES_STR2NUM_DICT'], {}), '(JONES_STR2NUM_DICT)\n', (14878, 14898), False, 'import copy\n'), ((16440, 16473), 'copy.deepcopy', 'copy.deepcopy', (['JONES_NUM2STR_DICT'], {}), '(JONES_NUM2STR_DICT)\n', (16453, 16473), False, 'import copy\n'), ((21918, 21931), 'numpy.array', 'np.array', (['xyz'], {}), '(xyz)\n', (21926, 21931), True, 'import numpy as np\n'), ((23353, 23401), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['latitude'], {'dtype': 'np.float64'}), '(latitude, dtype=np.float64)\n', (23373, 23401), True, 'import numpy as np\n'), ((23418, 23467), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['longitude'], {'dtype': 'np.float64'}), '(longitude, dtype=np.float64)\n', (23438, 23467), True, 'import numpy as np\n'), ((23483, 23531), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['altitude'], {'dtype': 'np.float64'}), '(altitude, dtype=np.float64)\n', (23503, 23531), True, 'import numpy as np\n'), ((26142, 26155), 'numpy.array', 'np.array', (['xyz'], {}), '(xyz)\n', (26150, 26155), True, 'import numpy as np\n'), ((26500, 26530), 'numpy.linalg.norm', 'np.linalg.norm', (['xyz_in'], {'axis': '(1)'}), '(xyz_in, axis=1)\n', (26514, 26530), True, 'import numpy as np\n'), ((27815, 27828), 'numpy.array', 'np.array', (['enu'], {}), '(enu)\n', (27823, 27828), True, 'import numpy as np\n'), ((30930, 30953), 'numpy.zeros_like', 'np.zeros_like', (['jd_array'], {}), '(jd_array)\n', (30943, 30953), True, 'import numpy as np\n'), ((30977, 31017), 'numpy.unique', 'np.unique', (['jd_array'], {'return_inverse': '(True)'}), '(jd_array, return_inverse=True)\n', (30986, 31017), True, 'import numpy as np\n'), ((32139, 32167), 'numpy.asarray', 'np.asarray', (['location_vectors'], {}), '(location_vectors)\n', (32149, 32167), True, 'import numpy as np\n'), ((32187, 32211), 'numpy.asarray', 'np.asarray', (['location_ids'], {}), '(location_ids)\n', (32197, 32211), True, 'import numpy as np\n'), ((34746, 34770), 'copy.copy', 'copy.copy', (['baseline_vecs'], {}), '(baseline_vecs)\n', (34755, 34770), False, 'import copy\n'), ((35634, 35657), 'numpy.zeros', 'np.zeros', (['(n_unique, 3)'], {}), '((n_unique, 3))\n', (35642, 35657), True, 'import numpy as np\n'), ((38029, 38042), 'numpy.array', 'np.array', (['bls'], {}), '(bls)\n', (38037, 38042), True, 'import numpy as np\n'), ((38057, 38074), 'numpy.array', 'np.array', (['bl_vecs'], {}), '(bl_vecs)\n', (38065, 38074), True, 'import numpy as np\n'), ((39253, 39271), 'copy.deepcopy', 'copy.deepcopy', (['arr'], {}), '(arr)\n', (39266, 39271), False, 'import copy\n'), ((39507, 39533), 'numpy.sum', 'np.sum', (['weights'], {'axis': 'axis'}), '(weights, axis=axis)\n', (39513, 39533), True, 'import numpy as np\n'), ((39675, 39707), 'numpy.sum', 'np.sum', (['(weights * arr)'], {'axis': 'axis'}), '(weights * arr, axis=axis)\n', (39681, 39707), True, 'import numpy as np\n'), ((39749, 39793), 'numpy.true_divide', 'np.true_divide', (['out', 'weight_out'], {'where': 'where'}), '(out, weight_out, where=where)\n', (39763, 39793), True, 'import numpy as np\n'), ((39804, 39832), 'numpy.where', 'np.where', (['where', 'out', 'np.inf'], {}), '(where, out, np.inf)\n', (39812, 39832), True, 'import numpy as np\n'), ((42837, 42859), 'numpy.any', 'np.any', (['arr'], {'axis': 'axis'}), '(arr, axis=axis)\n', (42843, 42859), True, 'import numpy as np\n'), ((43936, 43958), 'numpy.all', 'np.all', (['arr'], {'axis': 'axis'}), '(arr, axis=axis)\n', (43942, 43958), True, 'import numpy as np\n'), ((48113, 48145), 'numpy.asarray', 'np.asarray', (['uvdata.antenna_names'], {}), '(uvdata.antenna_names)\n', (48123, 48145), True, 'import numpy as np\n'), ((48358, 48384), 'numpy.unique', 'np.unique', (['uvcal.ant_array'], {}), '(uvcal.ant_array)\n', (48367, 48384), True, 'import numpy as np\n'), ((48411, 48442), 'numpy.asarray', 'np.asarray', (['uvcal.antenna_names'], {}), '(uvcal.antenna_names)\n', (48421, 48442), True, 'import numpy as np\n'), ((55339, 55367), 'numpy.unique', 'np.unique', (['uvdata.time_array'], {}), '(uvdata.time_array)\n', (55348, 55367), True, 'import numpy as np\n'), ((6503, 6556), 'numpy.any', 'np.any', (['[(sub in key) for sub in std_fits_substrings]'], {}), '([(sub in key) for sub in std_fits_substrings])\n', (6509, 6556), True, 'import numpy as np\n'), ((8767, 8813), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['baseline'], {'dtype': 'np.int64'}), '(baseline, dtype=np.int64)\n', (8787, 8813), True, 'import numpy as np\n'), ((9951, 9993), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['ant1'], {'dtype': 'np.int64'}), '(ant1, dtype=np.int64)\n', (9971, 9993), True, 'import numpy as np\n'), ((10003, 10045), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['ant2'], {'dtype': 'np.int64'}), '(ant2, dtype=np.int64)\n', (10023, 10045), True, 'import numpy as np\n'), ((22517, 22564), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['xyz_use'], {'dtype': 'np.float64'}), '(xyz_use, dtype=np.float64)\n', (22537, 22564), True, 'import numpy as np\n'), ((26583, 26634), 'numpy.any', 'np.any', (['(ecef_magnitudes <= sensible_radius_range[0])'], {}), '(ecef_magnitudes <= sensible_radius_range[0])\n', (26589, 26634), True, 'import numpy as np\n'), ((26638, 26689), 'numpy.any', 'np.any', (['(ecef_magnitudes >= sensible_radius_range[1])'], {}), '(ecef_magnitudes >= sensible_radius_range[1])\n', (26644, 26689), True, 'import numpy as np\n'), ((26868, 26914), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['xyz_in'], {'dtype': 'np.float64'}), '(xyz_in, dtype=np.float64)\n', (26888, 26914), True, 'import numpy as np\n'), ((26924, 26972), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['latitude'], {'dtype': 'np.float64'}), '(latitude, dtype=np.float64)\n', (26944, 26972), True, 'import numpy as np\n'), ((26982, 27031), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['longitude'], {'dtype': 'np.float64'}), '(longitude, dtype=np.float64)\n', (27002, 27031), True, 'import numpy as np\n'), ((27041, 27089), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['altitude'], {'dtype': 'np.float64'}), '(altitude, dtype=np.float64)\n', (27061, 27089), True, 'import numpy as np\n'), ((27139, 27154), 'numpy.squeeze', 'np.squeeze', (['enu'], {}), '(enu)\n', (27149, 27154), True, 'import numpy as np\n'), ((28078, 28125), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['enu_use'], {'dtype': 'np.float64'}), '(enu_use, dtype=np.float64)\n', (28098, 28125), True, 'import numpy as np\n'), ((28135, 28183), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['latitude'], {'dtype': 'np.float64'}), '(latitude, dtype=np.float64)\n', (28155, 28183), True, 'import numpy as np\n'), ((28193, 28242), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['longitude'], {'dtype': 'np.float64'}), '(longitude, dtype=np.float64)\n', (28213, 28242), True, 'import numpy as np\n'), ((28252, 28300), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['altitude'], {'dtype': 'np.float64'}), '(altitude, dtype=np.float64)\n', (28272, 28300), True, 'import numpy as np\n'), ((28351, 28366), 'numpy.squeeze', 'np.squeeze', (['xyz'], {}), '(xyz)\n', (28361, 28366), True, 'import numpy as np\n'), ((29359, 29373), 'numpy.float64', 'np.float64', (['ra'], {}), '(ra)\n', (29369, 29373), True, 'import numpy as np\n'), ((29383, 29398), 'numpy.float64', 'np.float64', (['dec'], {}), '(dec)\n', (29393, 29398), True, 'import numpy as np\n'), ((29408, 29459), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['initial_uvw'], {'dtype': 'np.float64'}), '(initial_uvw, dtype=np.float64)\n', (29428, 29459), True, 'import numpy as np\n'), ((30257, 30271), 'numpy.float64', 'np.float64', (['ra'], {}), '(ra)\n', (30267, 30271), True, 'import numpy as np\n'), ((30273, 30288), 'numpy.float64', 'np.float64', (['dec'], {}), '(dec)\n', (30283, 30288), True, 'import numpy as np\n'), ((30290, 30333), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['uvw'], {'dtype': 'np.float64'}), '(uvw, dtype=np.float64)\n', (30310, 30333), True, 'import numpy as np\n'), ((32407, 32430), 'scipy.spatial.distance.pdist', 'pdist', (['location_vectors'], {}), '(location_vectors)\n', (32412, 32430), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((32504, 32528), 'scipy.spatial.distance.squareform', 'squareform', (['adj_triu_mat'], {}), '(adj_triu_mat)\n', (32514, 32528), False, 'from scipy.spatial.distance import pdist, squareform\n'), ((35192, 35224), 'numpy.array', 'np.array', (['conjugates'], {'dtype': 'bool'}), '(conjugates, dtype=bool)\n', (35200, 35224), True, 'import numpy as np\n'), ((35782, 35821), 'numpy.mean', 'np.mean', (['baseline_vecs[inds, :]'], {'axis': '(0)'}), '(baseline_vecs[inds, :], axis=0)\n', (35789, 35821), True, 'import numpy as np\n'), ((35842, 35878), 'numpy.sum', 'np.sum', (['(vec_bin_centers ** 2)'], {'axis': '(1)'}), '(vec_bin_centers ** 2, axis=1)\n', (35848, 35878), True, 'import numpy as np\n'), ((39340, 39357), 'numpy.ones_like', 'np.ones_like', (['arr'], {}), '(arr)\n', (39352, 39357), True, 'import numpy as np\n'), ((39386, 39408), 'copy.deepcopy', 'copy.deepcopy', (['weights'], {}), '(weights)\n', (39399, 39408), False, 'import copy\n'), ((39471, 39484), 'numpy.isinf', 'np.isinf', (['arr'], {}), '(arr)\n', (39479, 39484), True, 'import numpy as np\n'), ((39631, 39664), 'numpy.sum', 'np.sum', (['weights_square'], {'axis': 'axis'}), '(weights_square, axis=axis)\n', (39637, 39664), True, 'import numpy as np\n'), ((40799, 40810), 'numpy.abs', 'np.abs', (['arr'], {}), '(arr)\n', (40805, 40810), True, 'import numpy as np\n'), ((42948, 43026), 'warnings.warn', 'warnings.warn', (['"""Currently weights are not handled when OR-ing boolean arrays."""'], {}), "('Currently weights are not handled when OR-ing boolean arrays.')\n", (42961, 43026), False, 'import warnings\n'), ((44047, 44126), 'warnings.warn', 'warnings.warn', (['"""Currently weights are not handled when AND-ing boolean arrays."""'], {}), "('Currently weights are not handled when AND-ing boolean arrays.')\n", (44060, 44126), False, 'import warnings\n'), ((48035, 48084), 'numpy.append', 'np.append', (['uvdata.ant_1_array', 'uvdata.ant_2_array'], {}), '(uvdata.ant_1_array, uvdata.ant_2_array)\n', (48044, 48084), True, 'import numpy as np\n'), ((55674, 55701), 'numpy.unique', 'np.unique', (['uvcal.time_array'], {}), '(uvcal.time_array)\n', (55683, 55701), True, 'import numpy as np\n'), ((72479, 72514), 're.search', 're.search', (['bl_re', 'ant_str[str_pos:]'], {}), '(bl_re, ant_str[str_pos:])\n', (72488, 72514), False, 'import re\n'), ((31311, 31386), 'numpy.isin', 'np.isin', (['status', '(iers.TIME_BEFORE_IERS_RANGE, iers.TIME_BEYOND_IERS_RANGE)'], {}), '(status, (iers.TIME_BEFORE_IERS_RANGE, iers.TIME_BEYOND_IERS_RANGE))\n', (31318, 31386), True, 'import numpy as np\n'), ((31410, 31502), 'warnings.warn', 'warnings.warn', (['"""time is out of IERS range, setting delta ut1 utc to extrapolated value"""'], {}), "(\n 'time is out of IERS range, setting delta ut1 utc to extrapolated value')\n", (31423, 31502), False, 'import warnings\n'), ((33441, 33467), 'numpy.unique', 'np.unique', (['loc_gps'], {'axis': '(0)'}), '(loc_gps, axis=0)\n', (33450, 33467), True, 'import numpy as np\n'), ((34938, 34970), 'numpy.isclose', 'np.isclose', (['bv[0]', '(0.0)'], {'atol': 'tol'}), '(bv[0], 0.0, atol=tol)\n', (34948, 34970), True, 'import numpy as np\n'), ((35022, 35054), 'numpy.isclose', 'np.isclose', (['bv[1]', '(0.0)'], {'atol': 'tol'}), '(bv[1], 0.0, atol=tol)\n', (35032, 35054), True, 'import numpy as np\n'), ((39448, 39461), 'numpy.isinf', 'np.isinf', (['arr'], {}), '(arr)\n', (39456, 39461), True, 'import numpy as np\n'), ((41655, 41666), 'numpy.abs', 'np.abs', (['arr'], {}), '(arr)\n', (41661, 41666), True, 'import numpy as np\n'), ((41879, 41894), 'numpy.sqrt', 'np.sqrt', (['out[0]'], {}), '(out[0])\n', (41886, 41894), True, 'import numpy as np\n'), ((42025, 42037), 'numpy.sqrt', 'np.sqrt', (['out'], {}), '(out)\n', (42032, 42037), True, 'import numpy as np\n'), ((43070, 43103), 'numpy.ones_like', 'np.ones_like', (['out'], {'dtype': 'np.float'}), '(out, dtype=np.float)\n', (43082, 43103), True, 'import numpy as np\n'), ((44170, 44203), 'numpy.ones_like', 'np.ones_like', (['out'], {'dtype': 'np.float'}), '(out, dtype=np.float)\n', (44182, 44203), True, 'import numpy as np\n'), ((48872, 48920), 'numpy.nonzero', 'np.nonzero', (['(uvcal_used_antnames == this_ant_name)'], {}), '(uvcal_used_antnames == this_ant_name)\n', (48882, 48920), True, 'import numpy as np\n'), ((55744, 55852), 'numpy.allclose', 'np.allclose', (['uvcal_times', 'uvdata_times'], {'atol': 'uvdata._time_array.tols[1]', 'rtol': 'uvdata._time_array.tols[0]'}), '(uvcal_times, uvdata_times, atol=uvdata._time_array.tols[1],\n rtol=uvdata._time_array.tols[0])\n', (55755, 55852), True, 'import numpy as np\n'), ((58817, 58848), 'numpy.sort', 'np.sort', (['uvcal.freq_array[0, :]'], {}), '(uvcal.freq_array[0, :])\n', (58824, 58848), True, 'import numpy as np\n'), ((58862, 58894), 'numpy.sort', 'np.sort', (['uvdata.freq_array[0, :]'], {}), '(uvdata.freq_array[0, :])\n', (58869, 58894), True, 'import numpy as np\n'), ((60438, 60586), 'warnings.warn', 'warnings.warn', (['f"""Feed polarization {feed} exists on UVData but not on UVCal. This will become an error in version 2.2"""', 'DeprecationWarning'], {}), "(\n f'Feed polarization {feed} exists on UVData but not on UVCal. This will become an error in version 2.2'\n , DeprecationWarning)\n", (60451, 60586), False, 'import warnings\n'), ((4582, 4601), 'numpy.arange', 'np.arange', (['axis_num'], {}), '(axis_num)\n', (4591, 4601), True, 'import numpy as np\n'), ((12060, 12106), 'warnings.warn', 'warnings.warn', (['"""x_orientation not recognized."""'], {}), "('x_orientation not recognized.')\n", (12073, 12106), False, 'import warnings\n'), ((13719, 13765), 'warnings.warn', 'warnings.warn', (['"""x_orientation not recognized."""'], {}), "('x_orientation not recognized.')\n", (13732, 13765), False, 'import warnings\n'), ((15233, 15279), 'warnings.warn', 'warnings.warn', (['"""x_orientation not recognized."""'], {}), "('x_orientation not recognized.')\n", (15246, 15279), False, 'import warnings\n'), ((16808, 16854), 'warnings.warn', 'warnings.warn', (['"""x_orientation not recognized."""'], {}), "('x_orientation not recognized.')\n", (16821, 16854), False, 'import warnings\n'), ((24607, 24620), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (24613, 24620), True, 'import numpy as np\n'), ((24659, 24672), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (24665, 24672), True, 'import numpy as np\n'), ((24674, 24687), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (24680, 24687), True, 'import numpy as np\n'), ((25346, 25359), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (25352, 25359), True, 'import numpy as np\n'), ((25398, 25411), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (25404, 25411), True, 'import numpy as np\n'), ((25413, 25426), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (25419, 25426), True, 'import numpy as np\n'), ((31087, 31115), 'astropy.coordinates.Angle', 'Angle', (['longitude'], {'unit': '"""deg"""'}), "(longitude, unit='deg')\n", (31092, 31115), False, 'from astropy.coordinates import Angle\n'), ((31117, 31144), 'astropy.coordinates.Angle', 'Angle', (['latitude'], {'unit': '"""deg"""'}), "(latitude, unit='deg')\n", (31122, 31144), False, 'from astropy.coordinates import Angle\n'), ((35711, 35735), 'numpy.where', 'np.where', (['(i == baselines)'], {}), '(i == baselines)\n', (35719, 35735), True, 'import numpy as np\n'), ((41976, 41991), 'numpy.sqrt', 'np.sqrt', (['out[0]'], {}), '(out[0])\n', (41983, 41991), True, 'import numpy as np\n'), ((59253, 59366), 'numpy.isclose', 'np.isclose', (['(uvcal.freq_array - this_freq)', '(0)'], {'atol': 'uvdata._freq_array.tols[1]', 'rtol': 'uvdata._freq_array.tols[0]'}), '(uvcal.freq_array - this_freq, 0, atol=uvdata._freq_array.tols[1],\n rtol=uvdata._freq_array.tols[0])\n', (59263, 59366), True, 'import numpy as np\n'), ((59631, 59776), 'warnings.warn', 'warnings.warn', (['f"""Frequency {this_freq} exists on UVData but not on UVCal. This will become an error in version 2.2"""', 'DeprecationWarning'], {}), "(\n f'Frequency {this_freq} exists on UVData but not on UVCal. This will become an error in version 2.2'\n , DeprecationWarning)\n", (59644, 59776), False, 'import warnings\n'), ((67946, 67971), 'numpy.unique', 'np.unique', (['uvf.time_array'], {}), '(uvf.time_array)\n', (67955, 67971), True, 'import numpy as np\n'), ((67974, 67999), 'numpy.unique', 'np.unique', (['uvd.time_array'], {}), '(uvd.time_array)\n', (67983, 67999), True, 'import numpy as np\n'), ((68323, 68348), 'numpy.unique', 'np.unique', (['uvf.freq_array'], {}), '(uvf.freq_array)\n', (68332, 68348), True, 'import numpy as np\n'), ((68351, 68376), 'numpy.unique', 'np.unique', (['uvd.freq_array'], {}), '(uvd.freq_array)\n', (68360, 68376), True, 'import numpy as np\n'), ((20965, 20985), 'numpy.where', 'np.where', (['(cpols == p)'], {}), '(cpols == p)\n', (20973, 20985), True, 'import numpy as np\n'), ((22239, 22270), 'numpy.linalg.norm', 'np.linalg.norm', (['xyz_use'], {'axis': '(1)'}), '(xyz_use, axis=1)\n', (22253, 22270), True, 'import numpy as np\n'), ((22304, 22335), 'numpy.linalg.norm', 'np.linalg.norm', (['xyz_use'], {'axis': '(1)'}), '(xyz_use, axis=1)\n', (22318, 22335), True, 'import numpy as np\n'), ((24627, 24640), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (24633, 24640), True, 'import numpy as np\n'), ((25366, 25379), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (25372, 25379), True, 'import numpy as np\n'), ((48226, 48268), 'numpy.where', 'np.where', (['(uvdata.antenna_numbers == antnum)'], {}), '(uvdata.antenna_numbers == antnum)\n', (48234, 48268), True, 'import numpy as np\n'), ((48521, 48562), 'numpy.where', 'np.where', (['(uvcal.antenna_numbers == antnum)'], {}), '(uvcal.antenna_numbers == antnum)\n', (48529, 48562), True, 'import numpy as np\n'), ((49273, 49440), 'warnings.warn', 'warnings.warn', (['"""All antenna names with data on UVData are missing on UVCal. Since ant_check is False, calibration will proceed but all data will be flagged."""'], {}), "(\n 'All antenna names with data on UVData are missing on UVCal. Since ant_check is False, calibration will proceed but all data will be flagged.'\n )\n", (49286, 49440), False, 'import warnings\n'), ((53511, 53708), 'warnings.warn', 'warnings.warn', (['f"""Antennas {name_missing} have data on UVData but are missing on UVCal. Since ant_check is False, calibration will proceed and the data for these antennas will be flagged."""'], {}), "(\n f'Antennas {name_missing} have data on UVData but are missing on UVCal. Since ant_check is False, calibration will proceed and the data for these antennas will be flagged.'\n )\n", (53524, 53708), False, 'import warnings\n'), ((56201, 56314), 'numpy.isclose', 'np.isclose', (['(uvcal.time_array - this_time)', '(0)'], {'atol': 'uvdata._time_array.tols[1]', 'rtol': 'uvdata._time_array.tols[0]'}), '(uvcal.time_array - this_time, 0, atol=uvdata._time_array.tols[1],\n rtol=uvdata._time_array.tols[0])\n', (56211, 56314), True, 'import numpy as np\n'), ((56619, 56759), 'warnings.warn', 'warnings.warn', (['f"""Time {this_time} exists on UVData but not on UVCal. This will become an error in version 2.2"""', 'DeprecationWarning'], {}), "(\n f'Time {this_time} exists on UVData but not on UVCal. This will become an error in version 2.2'\n , DeprecationWarning)\n", (56632, 56759), False, 'import warnings\n'), ((57184, 57296), 'numpy.isclose', 'np.isclose', (['uvdata_times', 'uvcal.time_array'], {'atol': 'uvdata._time_array.tols[1]', 'rtol': 'uvdata._time_array.tols[0]'}), '(uvdata_times, uvcal.time_array, atol=uvdata._time_array.tols[1],\n rtol=uvdata._time_array.tols[0])\n', (57194, 57296), True, 'import numpy as np\n'), ((57400, 57534), 'warnings.warn', 'warnings.warn', (['"""Times do not match between UVData and UVCal but time_check is False, so calibration will be applied anyway."""'], {}), "(\n 'Times do not match between UVData and UVCal but time_check is False, so calibration will be applied anyway.'\n )\n", (57413, 57534), False, 'import warnings\n'), ((57643, 57825), 'warnings.warn', 'warnings.warn', (['"""Times do not match between UVData and UVCal. Set time_check=False to apply calibration anyway. This will become an error in version 2.2"""', 'DeprecationWarning'], {}), "(\n 'Times do not match between UVData and UVCal. Set time_check=False to apply calibration anyway. This will become an error in version 2.2'\n , DeprecationWarning)\n", (57656, 57825), False, 'import warnings\n'), ((58045, 58065), 'numpy.min', 'np.min', (['uvdata_times'], {}), '(uvdata_times)\n', (58051, 58065), True, 'import numpy as np\n'), ((58103, 58123), 'numpy.max', 'np.max', (['uvdata_times'], {}), '(uvdata_times)\n', (58109, 58123), True, 'import numpy as np\n'), ((58204, 58338), 'warnings.warn', 'warnings.warn', (['"""Times do not match between UVData and UVCal but time_check is False, so calibration will be applied anyway."""'], {}), "(\n 'Times do not match between UVData and UVCal but time_check is False, so calibration will be applied anyway.'\n )\n", (58217, 58338), False, 'import warnings\n'), ((58447, 58629), 'warnings.warn', 'warnings.warn', (['"""Times do not match between UVData and UVCal. Set time_check=False to apply calibration anyway. This will become an error in version 2.2"""', 'DeprecationWarning'], {}), "(\n 'Times do not match between UVData and UVCal. Set time_check=False to apply calibration anyway. This will become an error in version 2.2'\n , DeprecationWarning)\n", (58460, 58629), False, 'import warnings\n'), ((64694, 64715), 'numpy.isclose', 'np.isclose', (['gain', '(0.0)'], {}), '(gain, 0.0)\n', (64704, 64715), True, 'import numpy as np\n'), ((50399, 50704), 'warnings.warn', 'warnings.warn', (['"""All antenna names with data on UVData are missing on UVCal. They do all have matching antenna numbers on UVCal. Currently the data will be calibrated using the matching antenna number, but that will be deprecated in version 2.2 and this will become an error."""', 'DeprecationWarning'], {}), "(\n 'All antenna names with data on UVData are missing on UVCal. They do all have matching antenna numbers on UVCal. Currently the data will be calibrated using the matching antenna number, but that will be deprecated in version 2.2 and this will become an error.'\n , DeprecationWarning)\n", (50412, 50704), False, 'import warnings\n'), ((54016, 54373), 'warnings.warn', 'warnings.warn', (['f"""Antennas {name_missing} have data on UVData but are missing on UVCal. Currently calibration will proceed and since flag_missing is True, the data for these antennas will be flagged. This will become an error in version 2.2, to continue calibration and flag missing antennas in the future, set ant_check=False."""', 'DeprecationWarning'], {}), "(\n f'Antennas {name_missing} have data on UVData but are missing on UVCal. Currently calibration will proceed and since flag_missing is True, the data for these antennas will be flagged. This will become an error in version 2.2, to continue calibration and flag missing antennas in the future, set ant_check=False.'\n , DeprecationWarning)\n", (54029, 54373), False, 'import warnings\n'), ((54683, 55059), 'warnings.warn', 'warnings.warn', (['f"""Antennas {name_missing} have data on UVData but are missing on UVCal. Currently calibration will proceed and since flag_missing is False, the data for these antennas will not be calibrated or flagged. This will become an error in version 2.2, to continue calibration and flag missing antennas in the future, set ant_check=False."""', 'DeprecationWarning'], {}), "(\n f'Antennas {name_missing} have data on UVData but are missing on UVCal. Currently calibration will proceed and since flag_missing is False, the data for these antennas will not be calibrated or flagged. This will become an error in version 2.2, to continue calibration and flag missing antennas in the future, set ant_check=False.'\n , DeprecationWarning)\n", (54696, 55059), False, 'import warnings\n'), ((68047, 68075), 'numpy.abs', 'np.abs', (['uvf._time_array.tols'], {}), '(uvf._time_array.tols)\n', (68053, 68075), True, 'import numpy as np\n'), ((68424, 68452), 'numpy.abs', 'np.abs', (['uvf._freq_array.tols'], {}), '(uvf._freq_array.tols)\n', (68430, 68452), True, 'import numpy as np\n'), ((51331, 51667), 'warnings.warn', 'warnings.warn', (['f"""Antennas {only_name_missing} have data on UVData but are missing on UVCal. They do have matching antenna numbers on UVCal. Currently the data for these antennas will be calibrated using the matching antenna number, but that will be deprecated in version 2.2 and this will become an error."""', 'DeprecationWarning'], {}), "(\n f'Antennas {only_name_missing} have data on UVData but are missing on UVCal. They do have matching antenna numbers on UVCal. Currently the data for these antennas will be calibrated using the matching antenna number, but that will be deprecated in version 2.2 and this will become an error.'\n , DeprecationWarning)\n", (51344, 51667), False, 'import warnings\n'), ((51973, 52330), 'warnings.warn', 'warnings.warn', (['f"""Antennas {both_missing} have data on UVData but are missing on UVCal. Currently calibration will proceed and since flag_missing is True, the data for these antennas will be flagged. This will become an error in version 2.2, to continue calibration and flag missing antennas in the future, set ant_check=False."""', 'DeprecationWarning'], {}), "(\n f'Antennas {both_missing} have data on UVData but are missing on UVCal. Currently calibration will proceed and since flag_missing is True, the data for these antennas will be flagged. This will become an error in version 2.2, to continue calibration and flag missing antennas in the future, set ant_check=False.'\n , DeprecationWarning)\n", (51986, 52330), False, 'import warnings\n'), ((52684, 53060), 'warnings.warn', 'warnings.warn', (['f"""Antennas {both_missing} have data on UVData but are missing on UVCal. Currently calibration will proceed and since flag_missing is False, the data for these antennas will not be calibrated or flagged. This will become an error in version 2.2, to continue calibration and flag missing antennas in the future, set ant_check=False."""', 'DeprecationWarning'], {}), "(\n f'Antennas {both_missing} have data on UVData but are missing on UVCal. Currently calibration will proceed and since flag_missing is False, the data for these antennas will not be calibrated or flagged. This will become an error in version 2.2, to continue calibration and flag missing antennas in the future, set ant_check=False.'\n , DeprecationWarning)\n", (52697, 53060), False, 'import warnings\n'), ((75054, 75091), 're.search', 're.search', (['"""(\\\\d+)([x,y,l,r])"""', 'ant_i'], {}), "('(\\\\d+)([x,y,l,r])', ant_i)\n", (75063, 75091), False, 'import re\n'), ((75175, 75212), 're.search', 're.search', (['"""(\\\\d+)([x,y,l,r])"""', 'ant_j'], {}), "('(\\\\d+)([x,y,l,r])', ant_j)\n", (75184, 75212), False, 'import re\n'), ((50007, 50054), 'numpy.where', 'np.where', (['(uvdata.antenna_names == this_ant_name)'], {}), '(uvdata.antenna_names == this_ant_name)\n', (50015, 50054), True, 'import numpy as np\n')] |
import gurobipy as gb
import pandas as pd
import numpy as np
from benders_stochastic_subproblem import Benders_Subproblem
####
# Benders' decomposition, stochastic version
# Generators' production are set day ahead.
# Subproblems find costs associated with that setting
# depending on which demand scenario occurs.
####
# Class which can have attributes set
class expando(object):
pass
class Benders_Master:
def __init__(self, max_iters=25, verbose=True, numscenarios=100, demand_avg=200.0, demand_std=20.0, epsilon=0.001, delta=0.001):
'''
Class which solves the benders decomposed version of the dispatch problem.
Parameters
----------
max_iters: int, default 25
Maximum number of Benders iterations to run.
verbose: boolean, default True
Print information on upper and lower bounds for each iteration
numscenarios: int, default 100
Number of scenarios to use for subproblems
demand_avg: float, default 200.0
Average demand, used as day-ahead bid.
demand_std: float, default 20.0
Standard deviation for demand in scenario generation.
epsilon: float, default 0.001
Relative threshold for benders iterations.
Iterations will stop if ub - lb > |epsilon * lb|
delta: float, default 0.001
Absolute threshold for benders iterations.
Iterations will stop if ub < lb + delta
'''
self.data = expando()
self.variables = expando()
self.constraints = expando()
self.results = expando()
self.params = expando()
self.params.max_iters = max_iters
self.params.verbose = verbose
self.params.numscenarios = numscenarios
self.params.demand_avg = demand_avg
self.params.demand_std = demand_std
self._init_benders_params(epsilon=epsilon, delta=delta)
self._load_data()
self._build_model()
def optimize(self, force_submodel_rebuild=False):
# initial solution
self.model.optimize()
# Only build submodels if they don't exist or a rebuild is forced.
if not hasattr(self, 'submodels') or force_submodel_rebuild:
self.submodels = {s: Benders_Subproblem(self, scenario=s) for s in self.data.scenarios}
# Update fixed variables for submodels and rebuild.
[sm.update_fixed_vars(self) for sm in self.submodels.itervalues()]
[sm.optimize() for sm in self.submodels.itervalues()]
# Update bounds based on submodel rebuild
self._update_bounds()
self._save_vars()
# Build cuts until we reach absolute and relative tolerance,
# or max_iters cuts have been generated.
while (
(self.data.ub > self.data.lb + self.data.delta or
self.data.ub - self.data.lb > abs(self.data.epsilon * self.data.lb)) and
len(self.data.cutlist) < self.params.max_iters):
# Generate new cut.
if self.params.verbose:
print('********')
print('* Benders\' step {0}:'.format(len(self.data.upper_bounds)))
print('* Upper bound: {0}'.format(self.data.ub))
print('* Lower bound: {0}'.format(self.data.lb))
print('********')
self._do_benders_step()
pass
def _do_benders_step(self):
self._add_cut()
self._start_from_previous()
self.model.optimize()
[sm.update_fixed_vars(self) for sm in self.submodels.itervalues()]
[sm.optimize() for sm in self.submodels.itervalues()]
self._update_bounds()
self._save_vars()
def _init_benders_params(self, epsilon=0.001, delta=0.001):
self.data.cutlist = []
self.data.upper_bounds = []
self.data.lower_bounds = []
self.data.mipgap = []
self.data.solvetime = []
self.data.alphas = []
self.data.lambdas = {}
self.data.epsilon = epsilon
self.data.delta = delta
self.data.ub = gb.GRB.INFINITY
self.data.lb = -gb.GRB.INFINITY
###
# Data Loading
###
def _load_data(self):
self._load_generator_data()
self._load_demand_data()
def _load_generator_data(self):
self.data.geninfo = pd.read_csv('benders_stochastic_gens.csv', index_col='gen', skipinitialspace=True)
self.data.generators = self.data.geninfo.index
def _load_demand_data(self):
self.data.VOLL = 1000
self.data.demand_da = self.params.demand_avg
self.data.scenarios = ['s'+str(i) for i in xrange(self.params.numscenarios)]
self.data.demand_rt = pd.Series(
data=np.random.normal(self.params.demand_avg, self.params.demand_std, size=self.params.numscenarios),
index=self.data.scenarios)
self.data.scenarioprobs = {s: 1.0/self.params.numscenarios for s in self.data.scenarios}
# Dump load
self.data.dumploadprice = 10
self.data.dumploadmax = self.data.demand_da
###
# Model Building
###
def _build_model(self):
self.model = gb.Model()
self._build_variables()
self._build_objective()
self._build_constraints()
self.model.update()
def _build_variables(self):
m = self.model
gens = self.data.generators
geninfo = self.data.geninfo
self.variables.gprod_da = {}
for g in gens:
self.variables.gprod_da[g] = m.addVar(lb=0, ub=geninfo.maxprod[g])
self.variables.load_da = m.addVar(lb=0, ub=self.data.demand_da)
# Benders' proxy variable
self.variables.alpha = m.addVar(lb=-self.data.demand_da*self.data.VOLL, ub=gb.GRB.INFINITY)
m.update()
def _build_objective(self):
m = self.model
gens = self.data.generators
geninfo = self.data.geninfo
self.objective = m.setObjective(
gb.quicksum(geninfo.price[g] * self.variables.gprod_da[g] for g in gens) -
self.data.VOLL*self.variables.load_da +
self.variables.alpha)
def _build_constraints(self):
m = self.model
gens = self.data.generators
geninfo = self.data.geninfo
self.constraints.powerbalance_da = m.addConstr(
gb.quicksum(self.variables.gprod_da[g] for g in gens),
gb.GRB.EQUAL,
self.variables.load_da)
self.constraints.cuts = {}
def _add_cut(self):
gens = self.data.generators
geninfo = self.data.geninfo
cut = len(self.data.cutlist)
self.data.cutlist.append(cut)
# Get sensitivities from subproblem
sens_gen = {
g: sum(self.data.scenarioprobs[s] * self.submodels[s].constraints.fixed_da[g].pi for s in self.data.scenarios)
for g in gens}
self.data.lambdas[cut] = sens_gen
sens_load = sum(self.data.scenarioprobs[s] * self.submodels[s].constraints.fixed_load_da.pi for s in self.data.scenarios)
# Get subproblem objectives)
z_sub = sum(self.data.scenarioprobs[s] * self.submodels[s].model.ObjVal for s in self.data.scenarios)
# Generate cut
self.constraints.cuts[cut] = self.model.addConstr(
self.variables.alpha,
gb.GRB.GREATER_EQUAL,
z_sub +
gb.quicksum(sens_gen[g] * self.variables.gprod_da[g] for g in gens) -
sum(sens_gen[g] * self.variables.gprod_da[g].x for g in gens) +
sens_load * (self.variables.load_da - self.variables.load_da.x)
)
def _clear_cuts(self):
self.data.cutlist = []
self.data.lambdas = {}
self.model.update()
for con in self.constraints.cuts.values():
self.model.remove(con)
self.constraints.cuts = {}
self.data.ub = gb.GRB.INFINITY
self.data.lb = -gb.GRB.INFINITY
self.data.upper_bounds = []
self.data.lower_bounds = []
###
# Update upper and lower bounds for Benders' iterations
###
def _update_bounds(self):
z_sub = sum(self.data.scenarioprobs[s] * self.submodels[s].model.ObjVal for s in self.data.scenarios)
z_master = self.model.ObjVal
# The best upper bound is the best incumbent with
# alpha replaced by the sub problems' actual cost
self.data.ub = z_master - self.variables.alpha.x + z_sub
# The best lower bound is the current bestbound,
# This will equal z_master at optimality
try:
self.data.lb = self.model.ObjBound
except gb.GurobiError:
self.data.lb = self.model.ObjVal
self.data.upper_bounds.append(self.data.ub)
self.data.lower_bounds.append(self.data.lb)
self.data.mipgap.append(self.model.params.IntFeasTol)
self.data.solvetime.append(self.model.Runtime)
def _save_vars(self):
# self.data.xs.append(self.variables.x.x)
# self.data.ys.append(self.submodel.variables.y.x)
self.data.alphas.append(self.variables.alpha.x)
def _start_from_previous(self):
'''
Used to warm-start MIP problems.
'''
pass
| [
"pandas.read_csv",
"benders_stochastic_subproblem.Benders_Subproblem",
"gurobipy.Model",
"gurobipy.quicksum",
"numpy.random.normal"
] | [((4492, 4578), 'pandas.read_csv', 'pd.read_csv', (['"""benders_stochastic_gens.csv"""'], {'index_col': '"""gen"""', 'skipinitialspace': '(True)'}), "('benders_stochastic_gens.csv', index_col='gen',\n skipinitialspace=True)\n", (4503, 4578), True, 'import pandas as pd\n'), ((5319, 5329), 'gurobipy.Model', 'gb.Model', ([], {}), '()\n', (5327, 5329), True, 'import gurobipy as gb\n'), ((6494, 6547), 'gurobipy.quicksum', 'gb.quicksum', (['(self.variables.gprod_da[g] for g in gens)'], {}), '(self.variables.gprod_da[g] for g in gens)\n', (6505, 6547), True, 'import gurobipy as gb\n'), ((2397, 2433), 'benders_stochastic_subproblem.Benders_Subproblem', 'Benders_Subproblem', (['self'], {'scenario': 's'}), '(self, scenario=s)\n', (2415, 2433), False, 'from benders_stochastic_subproblem import Benders_Subproblem\n'), ((4890, 4990), 'numpy.random.normal', 'np.random.normal', (['self.params.demand_avg', 'self.params.demand_std'], {'size': 'self.params.numscenarios'}), '(self.params.demand_avg, self.params.demand_std, size=self.\n params.numscenarios)\n', (4906, 4990), True, 'import numpy as np\n'), ((6134, 6206), 'gurobipy.quicksum', 'gb.quicksum', (['(geninfo.price[g] * self.variables.gprod_da[g] for g in gens)'], {}), '(geninfo.price[g] * self.variables.gprod_da[g] for g in gens)\n', (6145, 6206), True, 'import gurobipy as gb\n'), ((7537, 7604), 'gurobipy.quicksum', 'gb.quicksum', (['(sens_gen[g] * self.variables.gprod_da[g] for g in gens)'], {}), '(sens_gen[g] * self.variables.gprod_da[g] for g in gens)\n', (7548, 7604), True, 'import gurobipy as gb\n')] |
from __future__ import annotations
from typing import Union
import numpy as np
def fix_nodata(
arr: np.ndarray,
nodata: Union[np.int32, np.int64, np.float32, np.float64]
) -> np.ndarray:
"""Set values close to nodata to nan.
Parameters:
arr: data array to fix
nodata: value used to represent nodata
Returns:
array with imposed nan values
"""
arr[arr <= nodata+1] = np.nan
return arr
def is_all_nan(arr: np.ndarray) -> bool:
"""Test whether all array elements are nan.
Parameters:
arr: array to test
Returns:
result
"""
if np.isnan(arr).all():
return True
else:
return False
def nan_shape(shape: tuple[int, ...]) -> np.ndarray:
"""Create an array of nan values and given shape.
Parameters:
shape: array shape
Returns:
array of nans
"""
result = np.empty(shape)
result[:] = np.nan
return result
| [
"numpy.empty",
"numpy.isnan"
] | [((908, 923), 'numpy.empty', 'np.empty', (['shape'], {}), '(shape)\n', (916, 923), True, 'import numpy as np\n'), ((625, 638), 'numpy.isnan', 'np.isnan', (['arr'], {}), '(arr)\n', (633, 638), True, 'import numpy as np\n')] |
"""ImageNet data loader."""
import os
import numpy as np
from scipy import misc
from collections import OrderedDict
import theano
from athenet.utils import get_bin_path, get_data_path
from athenet.data_loader import DataLoader, Buffer
class ImageNetDataLoader(DataLoader):
"""ImageNet data loader."""
name_prefix = 'ILSVRC'
train_suffix = '_img_train'
val_suffix = '_img_val'
mean_rgb = [123, 117, 104]
verbosity = 0
def __init__(self, year, image_shape, buffer_size=1, train_data=True,
val_data=True, val_size=None, reverse_training=True,
reverse_validation=True):
"""Create ImageNet data loader.
:param year: Specifies which year's data should be loaded.
:param image_shape: Image shape in format (height, width).
:param buffer_size: Number of batches to be stored in memory.
:param train_data: Specifies whether to load training data.
:param val_data: Specifies whether to load validation data.
:param val_size: Maximal size of validation data. If None, then all
validation data will be used. Otherwise, val_size
images will be chosen randomly from the whole set.
:param reverse: When set on True, reversed copies of images will be
attached to train and validaton data
"""
super(ImageNetDataLoader, self).__init__()
self.buffer_size = buffer_size
self.shuffle_train_data = True
self._height, self._width = image_shape
base_name = self.name_prefix + str(year)
self.train_name = base_name + self.train_suffix
self.val_name = base_name + self.val_suffix
if train_data:
index = 0
answers = []
train_files = []
train_dirs = os.listdir(get_bin_path(self.train_name))
for d in train_dirs:
path = os.path.join(self.train_name, d)
files = os.listdir(get_bin_path(path))
train_files += [(os.path.join(d, f), False) for f in files]
answers += [index for i in range(len(files))]
if reverse_training:
train_files += [(os.path.join(d, f), True) for f in files]
answers += [index for i in range(len(files))]
index += 1
self.train_files = np.asarray(train_files)
self.train_answers = np.asarray(answers)
self._train_in = Buffer(self)
self._train_out = theano.shared(self.train_answers, borrow=True)
self.train_data_available = True
self.train_set_size = len(answers)
if val_data:
answers = OrderedDict()
with open(get_data_path(self.val_name + '.txt'), 'rb') as f:
while True:
line = f.readline()
if not line:
break
filename, answer = line.rsplit(' ', 1)
answers[filename] = np.array(int(answer), dtype="int32")
val_files = [(filename, False) for filename in answers.keys()]
val_answers = answers.values()
if reverse_validation:
val_files = [(filename, True) for filename in answers.keys()]
val_answers *= 2
val_answers = np.asarray(val_answers)
self.val_files = np.asarray(val_files)
self.val_set_size = len(self.val_files)
# Reduce amount of validation data, if necessary
if val_size and val_size < self.val_set_size:
ind = np.random.permutation(self.val_set_size)[:val_size]
self.val_files = self.val_files[ind]
val_answers = val_answers[ind]
self.val_set_size = val_size
self._val_in = Buffer(self)
self._val_out = theano.shared(val_answers, borrow=True)
self.val_data_available = True
self.batch_size = 1
def _get_img(self, filename, reverse):
img = misc.imread(get_bin_path(filename))
img = np.rollaxis(img, 2)
img = img.reshape((1, 3, self._height, self._width))
result = np.asarray(img, dtype=theano.config.floatX)
if reverse:
return result[..., ::-1]
return result
def _load_imgs(self, dir_name, files):
imgs = []
for filename, reverse in files:
img = self._get_img(os.path.join(dir_name, filename), reverse)
r, g, b = np.split(img, 3, axis=1)
r -= self.mean_rgb[0]
g -= self.mean_rgb[1]
b -= self.mean_rgb[2]
img = np.concatenate([r, g, b], axis=1)
imgs += [img]
return np.asarray(np.concatenate(imgs, axis=0),
dtype=theano.config.floatX)
def load_val_data(self, batch_index):
if self._val_in.contains(batch_index):
return
files = self._get_subset(self.val_files, batch_index, self.buffer_size)
imgs = self._load_imgs(self.val_name, files)
self._set_subset(self._val_in, imgs, batch_index, self.buffer_size)
def val_input(self, batch_index):
return self._get_subset(self._val_in, batch_index)
def val_output(self, batch_index):
return self._get_subset(self._val_out, batch_index)
def load_train_data(self, batch_index):
if self._train_in.contains(batch_index):
return
# Shuffle images when starting new epoch
if batch_index == 0 and self.shuffle_train_data:
ind = np.random.permutation(self.train_set_size)
self.train_files = self.train_files[ind]
self.train_answers = self.train_answers[ind]
self._train_out.set_value(self.train_answers, borrow=True)
files = self._get_subset(self.train_files, batch_index,
self.buffer_size)
imgs = self._load_imgs(self.train_name, files)
self._set_subset(self._train_in, imgs, batch_index, self.buffer_size)
def train_input(self, batch_index):
return self._get_subset(self._train_in, batch_index)
def train_output(self, batch_index):
return self._get_subset(self._train_out, batch_index)
class AlexNetImageNetDataLoader(ImageNetDataLoader):
"""ImageNet data loader for AlexNet."""
def __init__(self, year=2012, image_shape=(227, 227), buffer_size=1,
train_data=False, val_data=True, val_size=None,
reverse_training=True, reverse_validation=True):
self.val_suffix = '_img_val_alexnet'
super(AlexNetImageNetDataLoader, self).__init__(year, image_shape,
buffer_size, train_data, val_data, val_size, reverse_training,
reverse_validation)
class GoogleNetImageNetDataLoader(ImageNetDataLoader):
"""ImageNet data loader for GoogleNet."""
def __init__(self, year=2012, image_shape=(224, 224), buffer_size=1,
train_data=False, val_data=True, val_size=None,
reverse_training=True, reverse_validation=True):
self.val_suffix = '_img_val_googlenet'
super(GoogleNetImageNetDataLoader, self).__init__(year, image_shape,
buffer_size, train_data, val_data, val_size, reverse_training,
reverse_validation)
| [
"numpy.asarray",
"athenet.data_loader.Buffer",
"athenet.utils.get_data_path",
"numpy.split",
"theano.shared",
"athenet.utils.get_bin_path",
"numpy.random.permutation",
"numpy.rollaxis",
"collections.OrderedDict",
"os.path.join",
"numpy.concatenate"
] | [((4148, 4167), 'numpy.rollaxis', 'np.rollaxis', (['img', '(2)'], {}), '(img, 2)\n', (4159, 4167), True, 'import numpy as np\n'), ((4246, 4289), 'numpy.asarray', 'np.asarray', (['img'], {'dtype': 'theano.config.floatX'}), '(img, dtype=theano.config.floatX)\n', (4256, 4289), True, 'import numpy as np\n'), ((2416, 2439), 'numpy.asarray', 'np.asarray', (['train_files'], {}), '(train_files)\n', (2426, 2439), True, 'import numpy as np\n'), ((2473, 2492), 'numpy.asarray', 'np.asarray', (['answers'], {}), '(answers)\n', (2483, 2492), True, 'import numpy as np\n'), ((2523, 2535), 'athenet.data_loader.Buffer', 'Buffer', (['self'], {}), '(self)\n', (2529, 2535), False, 'from athenet.data_loader import DataLoader, Buffer\n'), ((2566, 2612), 'theano.shared', 'theano.shared', (['self.train_answers'], {'borrow': '(True)'}), '(self.train_answers, borrow=True)\n', (2579, 2612), False, 'import theano\n'), ((2749, 2762), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2760, 2762), False, 'from collections import OrderedDict\n'), ((3393, 3416), 'numpy.asarray', 'np.asarray', (['val_answers'], {}), '(val_answers)\n', (3403, 3416), True, 'import numpy as np\n'), ((3446, 3467), 'numpy.asarray', 'np.asarray', (['val_files'], {}), '(val_files)\n', (3456, 3467), True, 'import numpy as np\n'), ((3887, 3899), 'athenet.data_loader.Buffer', 'Buffer', (['self'], {}), '(self)\n', (3893, 3899), False, 'from athenet.data_loader import DataLoader, Buffer\n'), ((3928, 3967), 'theano.shared', 'theano.shared', (['val_answers'], {'borrow': '(True)'}), '(val_answers, borrow=True)\n', (3941, 3967), False, 'import theano\n'), ((4110, 4132), 'athenet.utils.get_bin_path', 'get_bin_path', (['filename'], {}), '(filename)\n', (4122, 4132), False, 'from athenet.utils import get_bin_path, get_data_path\n'), ((4568, 4592), 'numpy.split', 'np.split', (['img', '(3)'], {'axis': '(1)'}), '(img, 3, axis=1)\n', (4576, 4592), True, 'import numpy as np\n'), ((4713, 4746), 'numpy.concatenate', 'np.concatenate', (['[r, g, b]'], {'axis': '(1)'}), '([r, g, b], axis=1)\n', (4727, 4746), True, 'import numpy as np\n'), ((4799, 4827), 'numpy.concatenate', 'np.concatenate', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (4813, 4827), True, 'import numpy as np\n'), ((5638, 5680), 'numpy.random.permutation', 'np.random.permutation', (['self.train_set_size'], {}), '(self.train_set_size)\n', (5659, 5680), True, 'import numpy as np\n'), ((1863, 1892), 'athenet.utils.get_bin_path', 'get_bin_path', (['self.train_name'], {}), '(self.train_name)\n', (1875, 1892), False, 'from athenet.utils import get_bin_path, get_data_path\n'), ((1950, 1982), 'os.path.join', 'os.path.join', (['self.train_name', 'd'], {}), '(self.train_name, d)\n', (1962, 1982), False, 'import os\n'), ((4503, 4535), 'os.path.join', 'os.path.join', (['dir_name', 'filename'], {}), '(dir_name, filename)\n', (4515, 4535), False, 'import os\n'), ((2018, 2036), 'athenet.utils.get_bin_path', 'get_bin_path', (['path'], {}), '(path)\n', (2030, 2036), False, 'from athenet.utils import get_bin_path, get_data_path\n'), ((2785, 2822), 'athenet.utils.get_data_path', 'get_data_path', (["(self.val_name + '.txt')"], {}), "(self.val_name + '.txt')\n", (2798, 2822), False, 'from athenet.utils import get_bin_path, get_data_path\n'), ((3662, 3702), 'numpy.random.permutation', 'np.random.permutation', (['self.val_set_size'], {}), '(self.val_set_size)\n', (3683, 3702), True, 'import numpy as np\n'), ((2071, 2089), 'os.path.join', 'os.path.join', (['d', 'f'], {}), '(d, f)\n', (2083, 2089), False, 'import os\n'), ((2250, 2268), 'os.path.join', 'os.path.join', (['d', 'f'], {}), '(d, f)\n', (2262, 2268), False, 'import os\n')] |
#!/usr/bin/env python
import healsparse
import healpy as hp
import numpy as np
import redmapper
import esutil
nside = 512
nsideCoverage = 32
gals = redmapper.GalaxyCatalog.from_fits_file('redmagic_test_input_gals.fit')
theta = np.radians(90.0 - gals.dec)
phi = np.radians(gals.ra)
ipnest = hp.ang2pix(nside, theta, phi, nest=True)
hist = esutil.stat.histogram(ipnest, min=0, max=hp.nside2npix(nside))
gdPix, = np.where(hist > 0)
sparseMap = healsparse.HealSparseMap.makeEmpty(nsideCoverage, nside, dtype=np.float32)
sparseMap.updateValues(gdPix, np.ones(gdPix.size, dtype=np.float32))
sparseMap.write('redmagic_test_mask_hs.fit')
| [
"numpy.radians",
"healsparse.HealSparseMap.makeEmpty",
"numpy.ones",
"redmapper.GalaxyCatalog.from_fits_file",
"healpy.nside2npix",
"numpy.where",
"healpy.ang2pix"
] | [((151, 221), 'redmapper.GalaxyCatalog.from_fits_file', 'redmapper.GalaxyCatalog.from_fits_file', (['"""redmagic_test_input_gals.fit"""'], {}), "('redmagic_test_input_gals.fit')\n", (189, 221), False, 'import redmapper\n'), ((231, 258), 'numpy.radians', 'np.radians', (['(90.0 - gals.dec)'], {}), '(90.0 - gals.dec)\n', (241, 258), True, 'import numpy as np\n'), ((265, 284), 'numpy.radians', 'np.radians', (['gals.ra'], {}), '(gals.ra)\n', (275, 284), True, 'import numpy as np\n'), ((295, 335), 'healpy.ang2pix', 'hp.ang2pix', (['nside', 'theta', 'phi'], {'nest': '(True)'}), '(nside, theta, phi, nest=True)\n', (305, 335), True, 'import healpy as hp\n'), ((417, 435), 'numpy.where', 'np.where', (['(hist > 0)'], {}), '(hist > 0)\n', (425, 435), True, 'import numpy as np\n'), ((449, 523), 'healsparse.HealSparseMap.makeEmpty', 'healsparse.HealSparseMap.makeEmpty', (['nsideCoverage', 'nside'], {'dtype': 'np.float32'}), '(nsideCoverage, nside, dtype=np.float32)\n', (483, 523), False, 'import healsparse\n'), ((554, 591), 'numpy.ones', 'np.ones', (['gdPix.size'], {'dtype': 'np.float32'}), '(gdPix.size, dtype=np.float32)\n', (561, 591), True, 'import numpy as np\n'), ((385, 405), 'healpy.nside2npix', 'hp.nside2npix', (['nside'], {}), '(nside)\n', (398, 405), True, 'import healpy as hp\n')] |
import os
import argparse
import cv2
import numpy as np
import face_blend_common as fbc
from face_landmark_detection import load_models_and_image, validate_params, display_image
def align_face(img, points, output_dim):
print('Aligning Image')
img_norm, points = fbc.normalizeImagesAndLandmarks(output_dim, img, points)
img_norm = np.uint8(img_norm * 255)
return img_norm
def main(predictor_path, image_filename, output_dim, output_path, display=False):
# Validation checks
validate_params(predictor_path, image_filename, output_path)
# Load models and image
face_detector, landmark_detector, img = load_models_and_image(predictor_path, image_filename)
if display:
display_image(img)
# Detect landmarks
points = np.array(fbc.getLandmarks(face_detector, landmark_detector, img))
# Convert image to floating point in the range 0 to 1
img = np.float32(img) / 255.0
# Align image
img_align = align_face(img, points, output_dim)
# Save image
output_filename = os.path.join(
output_path,
'_aligned'.join(os.path.splitext(os.path.basename(image_filename)))
)
cv2.imwrite(output_filename, img_align)
print('Output image saved to', output_filename)
if display:
display_image(img_align, title='Aligned Image')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--predictor', required=True, help='Predictor model')
parser.add_argument('-i', '--image', required=True, help='Image filename')
parser.add_argument(
'--output',
default=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'results'),
help='Output directory name'
)
parser.add_argument('--height', type=int, default=600, help='Output image height')
parser.add_argument('--width', type=int, default=600, help='Output image width')
parser.add_argument('--display', action='store_true', help='Display images')
args = parser.parse_args()
main(args.predictor, args.image, (args.height, args.width), args.output, args.display)
| [
"os.path.abspath",
"numpy.uint8",
"argparse.ArgumentParser",
"os.path.basename",
"face_landmark_detection.load_models_and_image",
"cv2.imwrite",
"numpy.float32",
"face_blend_common.getLandmarks",
"face_landmark_detection.validate_params",
"face_blend_common.normalizeImagesAndLandmarks",
"face_la... | [((273, 329), 'face_blend_common.normalizeImagesAndLandmarks', 'fbc.normalizeImagesAndLandmarks', (['output_dim', 'img', 'points'], {}), '(output_dim, img, points)\n', (304, 329), True, 'import face_blend_common as fbc\n'), ((345, 369), 'numpy.uint8', 'np.uint8', (['(img_norm * 255)'], {}), '(img_norm * 255)\n', (353, 369), True, 'import numpy as np\n'), ((502, 562), 'face_landmark_detection.validate_params', 'validate_params', (['predictor_path', 'image_filename', 'output_path'], {}), '(predictor_path, image_filename, output_path)\n', (517, 562), False, 'from face_landmark_detection import load_models_and_image, validate_params, display_image\n'), ((636, 689), 'face_landmark_detection.load_models_and_image', 'load_models_and_image', (['predictor_path', 'image_filename'], {}), '(predictor_path, image_filename)\n', (657, 689), False, 'from face_landmark_detection import load_models_and_image, validate_params, display_image\n'), ((1162, 1201), 'cv2.imwrite', 'cv2.imwrite', (['output_filename', 'img_align'], {}), '(output_filename, img_align)\n', (1173, 1201), False, 'import cv2\n'), ((1369, 1394), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1392, 1394), False, 'import argparse\n'), ((715, 733), 'face_landmark_detection.display_image', 'display_image', (['img'], {}), '(img)\n', (728, 733), False, 'from face_landmark_detection import load_models_and_image, validate_params, display_image\n'), ((780, 835), 'face_blend_common.getLandmarks', 'fbc.getLandmarks', (['face_detector', 'landmark_detector', 'img'], {}), '(face_detector, landmark_detector, img)\n', (796, 835), True, 'import face_blend_common as fbc\n'), ((906, 921), 'numpy.float32', 'np.float32', (['img'], {}), '(img)\n', (916, 921), True, 'import numpy as np\n'), ((1279, 1326), 'face_landmark_detection.display_image', 'display_image', (['img_align'], {'title': '"""Aligned Image"""'}), "(img_align, title='Aligned Image')\n", (1292, 1326), False, 'from face_landmark_detection import load_models_and_image, validate_params, display_image\n'), ((1117, 1149), 'os.path.basename', 'os.path.basename', (['image_filename'], {}), '(image_filename)\n', (1133, 1149), False, 'import os\n'), ((1648, 1673), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1663, 1673), False, 'import os\n')] |
# -*- coding: utf-8 -*-
# task_runner.py
"""
Run lightcurve processing tasks, as defined within a list of request objects.
"""
import logging
import os
import time
import numpy as np
from eas_batman_wrapper.batman_wrapper import BatmanWrapper
from eas_psls_wrapper.psls_wrapper import PslsWrapper
from .lc_reader_lcsg import read_lcsg_lightcurve
from .lightcurve import LightcurveArbitraryRaster
from .lightcurve_resample import LightcurveResampler
from .quality_control import quality_control
from .results_logger import ResultsToRabbitMQ
from .run_time_logger import RunTimesToRabbitMQ
from .task_timer import TaskTimer
from .tda_wrappers import bls_reference, bls_kovacs, dst_v26, dst_v29, exotrans, qats, tls
class TaskRunner:
"""
Within a worker node, run a sequence of lightcurve processing tasks, as defined within a list of tasks.
"""
def __init__(self, results_target="rabbitmq"):
"""
Instantiate a task runner.
:param results_target:
Define where we send our results to
:type results_target:
str
"""
# Destination for results from this task running. Either <rabbitmq> or <logging>
self.results_target = results_target
# List of all the lightcurves this task runner has written. Each a dictionary of <lc_filename> and
# <lc_directory>
self.lightcurves_written = []
# In memory storage for lightcurve objects, by name
self.lightcurves_in_memory = {}
# Name of the job we are currently working on
self.job_name = "untitled"
# Parameters associated with the job we are currently working on
self.job_parameters = {}
def read_lightcurve(self, source):
"""
Read an input lightcurve.
:param source:
A dictionary specifying the source of the lightcurve. It should contain the fields
<source>, <filename> and <directory>.
:type source:
dict
"""
# Extract fields from input data structure
lc_source = source.get('source', 'memory')
assert lc_source in ('memory', 'archive', 'lcsg')
lc_filename = source.get('filename', 'lightcurve.dat')
lc_directory = source.get('directory', 'test_lightcurves')
# Open connections to transit results and run times to output message queues
time_log = RunTimesToRabbitMQ(results_target=self.results_target)
# Read input lightcurve
if lc_source == 'memory':
lc = self.lightcurves_in_memory[lc_directory][lc_filename]
else:
if lc_source == 'lcsg':
lc_reader = read_lcsg_lightcurve
elif lc_source == 'archive':
lc_reader = LightcurveArbitraryRaster.from_file
else:
raise ValueError("Unknown lightcurve source <{}>".format(lc_source))
# Load lightcurve
with TaskTimer(job_name=self.job_name, target_name=lc_filename, task_name='load_lc',
parameters=self.job_parameters, time_logger=time_log):
lc = lc_reader(
filename=lc_filename,
directory=lc_directory
)
# Close connection to message queue
time_log.close()
# Return lightcurve object
return lc
def write_lightcurve(self, lightcurve, target):
"""
Write an output lightcurve.
:param lightcurve:
The Lightcurve object to write out.
:type lightcurve:
LightcurveArbitraryRaster
:param target:
A dictionary specifying the destination for the lightcurve. It should contain the fields
<source>, <filename> and <directory>.
:type target:
dict
"""
# Extract fields from input data structure
lc_target = target.get('source', 'memory')
assert lc_target in ('memory', 'archive', 'lcsg')
lc_filename = target.get('filename', 'lightcurve.dat')
lc_directory = target.get('directory', 'test_lightcurves')
# Open connections to transit results and run times to output message queues
time_log = RunTimesToRabbitMQ(results_target=self.results_target)
# Write output
if lc_target == "archive":
with TaskTimer(job_name=self.job_name, target_name=lc_filename, task_name='write_lc',
parameters=self.job_parameters, time_logger=time_log):
lightcurve.to_file(directory=lc_directory, filename=lc_filename)
self.lightcurves_written.append({
'source': 'archive',
'filename': lc_filename,
'directory': lc_directory
})
else:
if lc_directory not in self.lightcurves_in_memory:
self.lightcurves_in_memory[lc_directory] = {}
self.lightcurves_in_memory[lc_directory][lc_filename] = lightcurve
# Close connection to message queue
time_log.close()
def delete_lightcurve(self, lc_source):
"""
Delete a lightcurve.
:param lc_source:
A dictionary specifying the source for the input lightcurve. It should contain the fields
<source>, <filename> and <directory>.
:type lc_source:
dict
"""
# Extract fields from input data structure
source = lc_source.get('source', 'memory')
assert source in ('memory', 'archive', 'lcsg')
filename = lc_source.get('filename', 'lightcurve.dat')
directory = lc_source.get('directory', 'test_lightcurves')
# Delete lightcurve
if lc_source == 'memory':
del self.lightcurves_in_memory[lc_directory][lc_filename]
elif lc_source == 'archive':
# Full path for this lightcurve
file_path = os.path.join(settings['lcPath'], directory, filename)
if os.path.exists(file_path):
os.unlink(file_path)
def delete_all_products(self):
"""
Delete all of the lightcurves we have generated.
"""
for item in self.lightcurves_written:
self.delete_lightcurve(lc_source=item)
def psls_synthesise(self, job_name, target, specs):
"""
Perform the task of synthesising a lightcurve using PSLS.
:param job_name:
Specify the name of the job that these tasks is part of.
:type job_name:
str
:param target:
A dictionary specifying the destination for the lightcurve. It should contain the fields
<source>, <filename> and <directory>.
:type target:
dict
:param specs:
Specifications for the lightcurve we are to synthesise. The dictionary should define the following keys:
<duration>, <enable_transits>, <planet_radius>, <orbital_period>, <semi_major_axis>, <orbital_angle>
:type specs:
dict
"""
self.job_name = job_name
out_id = os.path.join(
target.get('directory', 'test_lightcurves'),
target.get('filename', 'lightcurve.dat')
)
logging.info("Running PSLS synthesis of <{}>".format(out_id))
# Record start time
start_time = time.time()
# Open connections to transit results and run times to output message queues
time_log = RunTimesToRabbitMQ(results_target=self.results_target)
result_log = ResultsToRabbitMQ(results_target=self.results_target)
# Do synthesis
with TaskTimer(job_name=job_name, target_name=out_id, task_name='psls_synthesis',
parameters=self.job_parameters, time_logger=time_log):
synthesiser = PslsWrapper()
synthesiser.configure(**specs)
lc_object = synthesiser.synthesise()
synthesiser.close()
# Write output
self.write_lightcurve(lightcurve=lc_object, target=target)
# Log LC metadata in results table
result_log.record_result(job_name=job_name, target_name=out_id,
task_name='psls_synthesis',
parameters=self.job_parameters, timestamp=start_time,
result=lc_object.metadata)
# Close connection to message queue
time_log.close()
result_log.close()
def batman_synthesise(self, job_name, target, specs):
"""
Perform the task of synthesising a lightcurve using batman.
:param job_name:
Specify the name of the job that these tasks is part of.
:type job_name:
str
:param target:
A dictionary specifying the destination for the lightcurve. It should contain the fields
<source>, <filename> and <directory>.
:type target:
dict
:param specs:
Specifications for the lightcurve we are to synthesise. The dictionary should define the following keys:
<duration>, <enable_transits>, <planet_radius>, <orbital_period>, <semi_major_axis>, <orbital_angle>
:type specs:
dict
"""
self.job_name = job_name
out_id = os.path.join(
target.get('directory', 'test_lightcurves'),
target.get('filename', 'lightcurve.dat')
)
logging.info("Running Batman synthesis of <{}>".format(out_id))
# Record start time
start_time = time.time()
# Open connections to transit results and run times to output message queues
time_log = RunTimesToRabbitMQ(results_target=self.results_target)
result_log = ResultsToRabbitMQ(results_target=self.results_target)
# Do synthesis
with TaskTimer(job_name=job_name, target_name=out_id, task_name='batman_synthesis',
parameters=self.job_parameters, time_logger=time_log):
synthesiser = BatmanWrapper()
synthesiser.configure(**specs)
lc_object = synthesiser.synthesise()
synthesiser.close()
# Write output
self.write_lightcurve(lightcurve=lc_object, target=target)
# Log LC metadata in results table
result_log.record_result(job_name=job_name, target_name=out_id,
task_name='batman_synthesis',
parameters=self.job_parameters, timestamp=start_time,
result=lc_object.metadata)
# Close connection to message queue
time_log.close()
result_log.close()
def lightcurves_multiply(self, job_name, input_1, input_2, output):
"""
Perform the task of multiplying two lightcurves together.
:param job_name:
Specify the name of the job that these tasks is part of.
:type job_name:
str
:param input_1:
A dictionary specifying the source for the first lightcurve. It should contain the fields
<source>, <filename> and <directory>.
:type input_1:
dict
:param input_2:
A dictionary specifying the source for the second lightcurve. It should contain the fields
<source>, <filename> and <directory>.
:type input_2:
dict
:param output:
A dictionary specifying the destination for the lightcurve. It should contain the fields
<source>, <filename> and <directory>.
:type output:
dict
"""
self.job_name = job_name
out_id = os.path.join(
output.get('directory', 'test_lightcurves'),
output.get('filename', 'lightcurve.dat')
)
logging.info("Multiplying lightcurves")
# Open connections to transit results and run times to output message queues
time_log = RunTimesToRabbitMQ(results_target=self.results_target)
# Load lightcurve 1
lc_1 = self.read_lightcurve(source=input_1)
# Load lightcurve 2
lc_2 = self.read_lightcurve(source=input_2)
# Multiply lightcurves together
with TaskTimer(job_name=job_name, target_name=out_id, task_name='multiplication',
parameters=self.job_parameters, time_logger=time_log):
result = lc_1 * lc_2
# Store result
self.write_lightcurve(lightcurve=result, target=output)
# Close connection to message queue
time_log.close()
def verify_lightcurve(self, job_name, source):
"""
Perform the task of verifying a lightcurve.
:param job_name:
Specify the name of the job that these tasks is part of.
:type job_name:
str
:param source:
A dictionary specifying the source for the input lightcurve. It should contain the fields
<source>, <filename> and <directory>.
:type source:
dict
"""
self.job_name = job_name
input_id = os.path.join(
source.get('directory', 'test_lightcurves'),
source.get('filename', 'lightcurve.dat')
)
logging.info("Verifying <{input_id}>.".format(input_id=input_id))
# Record start time
start_time = time.time()
# Open connections to transit results and run times to output message queues
time_log = RunTimesToRabbitMQ(results_target=self.results_target)
result_log = ResultsToRabbitMQ(results_target=self.results_target)
# Read input lightcurve
lc = self.read_lightcurve(source=source)
# Verify lightcurve
with TaskTimer(job_name=job_name, target_name=input_id, task_name='verify',
parameters=self.job_parameters, time_logger=time_log):
output = {
'time_min': np.min(lc.times),
'time_max': np.max(lc.times),
'flux_min': np.min(lc.fluxes),
'flux_max': np.max(lc.fluxes)
}
logging.info("Lightcurve <{}> time span {:.1f} to {:.1f}".format(input_id,
output['time_min'],
output['time_max']))
logging.info("Lightcurve <{}> flux range {:.6f} to {:.6f}".format(input_id,
output['flux_min'],
output['flux_max']))
# Run first code for checking LCs
error_count = lc.check_fixed_step(verbose=True, max_errors=4)
if error_count == 0:
logging.info("V1: Lightcurve <{}> has fixed step".format(input_id))
output['v1'] = True
else:
logging.info("V1: Lightcurve <{}> doesn't have fixed step ({:d} errors)".format(input_id, error_count))
output['v1'] = False
# Run second code for checking LCs
error_count = lc.check_fixed_step_v2(verbose=True, max_errors=4)
if error_count == 0:
logging.info("V2: Lightcurve <{}> has fixed step".format(input_id))
output['v2'] = True
else:
logging.info("V2: Lightcurve <{}> doesn't have fixed step ({:d} errors)".format(input_id, error_count))
output['v2'] = False
# Log output to results table
result_log.record_result(job_name=job_name, target_name=input_id,
task_name='verify',
parameters=self.job_parameters, timestamp=start_time,
result=output)
# Close connection to message queue
time_log.close()
result_log.close()
def rebin_lightcurve(self, job_name, cadence, source, target):
"""
Perform the task of re-binning a lightcurve.
:param job_name:
Specify the name of the job that these tasks is part of.
:type job_name:
str
:param cadence:
New time cadence for lightcurve, seconds.
:type cadence:
float
:param source:
A dictionary specifying the source for the input lightcurve. It should contain the fields
<source>, <filename> and <directory>.
:type source:
dict
:param target:
A dictionary specifying the target for the output lightcurve. It should contain the fields
<source>, <filename> and <directory>.
:type target:
dict
"""
self.job_name = job_name
input_id = os.path.join(
source.get('directory', 'test_lightcurves'),
source.get('filename', 'lightcurve.dat')
)
logging.info("Rebinning <{input_id}>.".format(input_id=input_id))
# Open connections to transit results and run times to output message queues
time_log = RunTimesToRabbitMQ(results_target=self.results_target)
# Read input lightcurve
lc = self.read_lightcurve(source=source)
# Re-bin lightcurve
with TaskTimer(job_name=job_name, target_name=input_id, task_name='binning',
parameters=self.job_parameters, time_logger=time_log):
start_time = np.min(lc.times)
end_time = np.max(lc.times)
new_times = np.arange(start_time, end_time, cadence / 86400) # Array of times (days)
resampler = LightcurveResampler(input_lc=lc)
new_lc = resampler.onto_raster(output_raster=new_times)
# Eliminate nasty edge effects
new_lc.fluxes[0] = 1
new_lc.fluxes[-1] = 1
# Write output
self.write_lightcurve(lightcurve=new_lc, target=target)
# Close connection to message queue
time_log.close()
def transit_search(self, job_name, lc_duration, tda_name, source, search_settings):
"""
Perform the task of running a lightcurve through a transit-detection algorithm.
:param job_name:
Specify the name of the job that these tasks is part of.
:type job_name:
str
:param lc_duration:
The maximum length of lightcurve to use; truncate the lightcurve after this period of time (days).
:type lc_duration:
float
:param tda_name:
The name of the transit-detection code to use.
:type tda_name:
str
:param source:
A dictionary specifying the source for the input lightcurve. It should contain the fields
<source>, <filename> and <directory>.
:type source:
dict
:param search_settings:
Dictionary of settings which control how we search for transits.
:type search_settings:
dict
"""
self.job_name = job_name
input_id = os.path.join(
source.get('directory', 'test_lightcurves'),
source.get('filename', 'lightcurve.dat')
)
logging.info("Running <{input_id}> through <{tda_name}> with duration {lc_days:.1f}.".format(
input_id=input_id,
tda_name=tda_name,
lc_days=lc_duration)
)
# Record start time
start_time = time.time()
# Open connections to transit results and run times to RabbitMQ message queues
time_log = RunTimesToRabbitMQ(results_target=self.results_target)
result_log = ResultsToRabbitMQ(results_target=self.results_target)
# Read input lightcurve
lc = self.read_lightcurve(source=source)
# Process lightcurve
with TaskTimer(job_name=job_name, tda_code=tda_name, target_name=input_id, task_name='transit_detection',
parameters=self.job_parameters, time_logger=time_log):
if tda_name == 'bls_reference':
x = bls_reference.process_lightcurve(lc=lc, lc_duration=lc_duration, search_settings=search_settings)
elif tda_name == 'bls_kovacs':
x = bls_kovacs.process_lightcurve(lc=lc, lc_duration=lc_duration, search_settings=search_settings)
elif tda_name == 'dst_v26':
x = dst_v26.process_lightcurve(lc=lc, lc_duration=lc_duration, search_settings=search_settings)
elif tda_name == 'dst_v29':
x = dst_v29.process_lightcurve(lc=lc, lc_duration=lc_duration, search_settings=search_settings)
elif tda_name == 'exotrans':
x = exotrans.process_lightcurve(lc=lc, lc_duration=lc_duration, search_settings=search_settings)
elif tda_name == 'qats':
x = qats.process_lightcurve(lc=lc, lc_duration=lc_duration, search_settings=search_settings)
elif tda_name == 'tls':
x = tls.process_lightcurve(lc=lc, lc_duration=lc_duration, search_settings=search_settings)
else:
assert False, "Unknown transit-detection code <{}>".format(tda_name)
# Extract output
output, output_extended = x
# Test whether transit-detection was successful
quality_control(lc=lc, metadata=output)
# Add additional metadata to results
for item in ['integrated_transit_power', 'pixels_in_transit', 'pixels_in_transit', 'mes']:
output[item] = lc.metadata.get(item, None)
# Send result to message queue
result_log.record_result(job_name=job_name, tda_code=tda_name, target_name=input_id,
task_name='transit_detection',
parameters=self.job_parameters, timestamp=start_time,
result=output, result_extended=output_extended)
# Close connection to message queue
time_log.close()
result_log.close()
def do_work(self, task_list, job_name="not set", job_parameters={}, clean_up_products=False):
"""
Perform a list of tasks sent to us via a list of request structures
:param job_name:
Optionally, specify the name of the job that these tasks are part of. If the "job_name" field is specified
in the tasks, this overrides the job name specified here.
:type job_name:
str
:param job_parameters:
Parameter values associated with this job.
:type job_parameters:
dict
:param task_list:
A list of dictionaries describing the tasks we are to perform, in sequence. Each task is assumed to depend
on the previous tasks, and so they are not run in parallel.
:type task_list:
List
:param clean_up_products:
Boolean flag indicating whether we should delete any data files we write to disk
:type clean_up_products:
bool
"""
# Check that task list is a list
assert isinstance(task_list, list)
# Record job's parameter values
self.job_parameters = job_parameters
# Do each task in list
for job_description in task_list:
# Check that task description is a dictionary
assert isinstance(job_description, dict)
# Null task
if job_description['task'] == 'null':
logging.info("Running null task")
# Error task
elif job_description['task'] == 'error':
logging.info("Running error task")
assert False, "Running error task"
# Transit search
elif job_description['task'] == 'transit_search':
self.transit_search(
job_name=job_description.get('job_name', job_name),
source=job_description['source'],
lc_duration=float(job_description.get('lc_duration', 730)),
tda_name=job_description.get('tda_name', 'tls'),
search_settings=job_description.get('search_settings', {})
)
# Synthesise lightcurve with PSLS
elif job_description['task'] == 'psls_synthesise':
self.psls_synthesise(
job_name=job_description.get('job_name', job_name),
target=job_description['target'],
specs=job_description.get('specs', {})
)
# Synthesise lightcurve with Batman
elif job_description['task'] == 'batman_synthesise':
self.batman_synthesise(
job_name=job_description.get('job_name', job_name),
target=job_description['target'],
specs=job_description.get('specs', {})
)
# Multiply two lightcurves together
elif job_description['task'] == 'multiplication':
self.lightcurves_multiply(
job_name=job_description.get('job_name', job_name),
input_1=job_description['input_1'],
input_2=job_description['input_2'],
output=job_description['output'],
)
# Verify lightcurve
elif job_description['task'] == 'verify':
self.verify_lightcurve(
job_name=job_description.get('job_name', job_name),
source=job_description['source'],
)
# Delete lightcurve
elif job_description['task'] == 'delete':
self.delete_lightcurve(
job_name=job_description.get('job_name', job_name),
source=job_description['source'],
)
# Re-bin lightcurve
elif job_description['task'] == 'binning':
self.rebin_lightcurve(
job_name=job_description.get('job_name', job_name),
source=job_description['source'],
target=job_description['target'],
cadence=job_description.get('cadence', 25)
)
# Unknown task
else:
raise ValueError("Unknown task <{}>".format(job_description['task']))
# Clean up products
if clean_up_products:
self.delete_all_products()
| [
"os.unlink",
"os.path.exists",
"time.time",
"logging.info",
"numpy.min",
"eas_batman_wrapper.batman_wrapper.BatmanWrapper",
"numpy.max",
"numpy.arange",
"eas_psls_wrapper.psls_wrapper.PslsWrapper",
"os.path.join"
] | [((7383, 7394), 'time.time', 'time.time', ([], {}), '()\n', (7392, 7394), False, 'import time\n'), ((9594, 9605), 'time.time', 'time.time', ([], {}), '()\n', (9603, 9605), False, 'import time\n'), ((11852, 11891), 'logging.info', 'logging.info', (['"""Multiplying lightcurves"""'], {}), "('Multiplying lightcurves')\n", (11864, 11891), False, 'import logging\n'), ((13402, 13413), 'time.time', 'time.time', ([], {}), '()\n', (13411, 13413), False, 'import time\n'), ((19572, 19583), 'time.time', 'time.time', ([], {}), '()\n', (19581, 19583), False, 'import time\n'), ((7848, 7861), 'eas_psls_wrapper.psls_wrapper.PslsWrapper', 'PslsWrapper', ([], {}), '()\n', (7859, 7861), False, 'from eas_psls_wrapper.psls_wrapper import PslsWrapper\n'), ((10061, 10076), 'eas_batman_wrapper.batman_wrapper.BatmanWrapper', 'BatmanWrapper', ([], {}), '()\n', (10074, 10076), False, 'from eas_batman_wrapper.batman_wrapper import BatmanWrapper\n'), ((17563, 17579), 'numpy.min', 'np.min', (['lc.times'], {}), '(lc.times)\n', (17569, 17579), True, 'import numpy as np\n'), ((17603, 17619), 'numpy.max', 'np.max', (['lc.times'], {}), '(lc.times)\n', (17609, 17619), True, 'import numpy as np\n'), ((17644, 17692), 'numpy.arange', 'np.arange', (['start_time', 'end_time', '(cadence / 86400)'], {}), '(start_time, end_time, cadence / 86400)\n', (17653, 17692), True, 'import numpy as np\n'), ((5944, 5997), 'os.path.join', 'os.path.join', (["settings['lcPath']", 'directory', 'filename'], {}), "(settings['lcPath'], directory, filename)\n", (5956, 5997), False, 'import os\n'), ((6014, 6039), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (6028, 6039), False, 'import os\n'), ((13973, 13989), 'numpy.min', 'np.min', (['lc.times'], {}), '(lc.times)\n', (13979, 13989), True, 'import numpy as np\n'), ((14019, 14035), 'numpy.max', 'np.max', (['lc.times'], {}), '(lc.times)\n', (14025, 14035), True, 'import numpy as np\n'), ((14065, 14082), 'numpy.min', 'np.min', (['lc.fluxes'], {}), '(lc.fluxes)\n', (14071, 14082), True, 'import numpy as np\n'), ((14112, 14129), 'numpy.max', 'np.max', (['lc.fluxes'], {}), '(lc.fluxes)\n', (14118, 14129), True, 'import numpy as np\n'), ((23603, 23636), 'logging.info', 'logging.info', (['"""Running null task"""'], {}), "('Running null task')\n", (23615, 23636), False, 'import logging\n'), ((6057, 6077), 'os.unlink', 'os.unlink', (['file_path'], {}), '(file_path)\n', (6066, 6077), False, 'import os\n'), ((23732, 23766), 'logging.info', 'logging.info', (['"""Running error task"""'], {}), "('Running error task')\n", (23744, 23766), False, 'import logging\n')] |
# -*- coding: UTF-8 -*-
"""
Copyright 2021 Tianshu AI Platform. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=============================================================
"""
import numpy as np
from tsne import bh_sne
from scipy import linalg as la
class projector_reduction:
def __init__(self, data, method, dimension=None):
self.method = method
self.dimension = 2 if dimension is None else dimension
self.data = self.data_preprocess(data)
def data_preprocess(self, data):
data = np.array(data)
data = data.reshape(data.shape[0], -1)
if data.shape[1] == 2 and self.dimension==3:
pad = np.zeros(shape=(data.shape[0], 1), dtype=data.dtype)
data = np.hstack((data, pad))
return data
def Pca(self):
assert self.dimension <= self.data.shape[1]
# do PCA
data = self.data
data -= data.mean(axis=0)
# working with covariance + (svd on cov.) is
# much faster than svd on data directly.
cov = np.dot(data.T, data) / data.shape[0]
u, s, v = la.svd(cov, full_matrices=False)
u = u[:, 0:self.dimension]
return np.dot(data, u).tolist()
def Tsne(self):
if self.dimension > 3:
raise ValueError('The dimension of the tsne method must be 2 or 3')
_data = np.array(self.data)
seed = np.random.RandomState(0)
perplexity = _data.shape[0] // 4 if _data.shape[0] < 100 else 25
data = bh_sne(_data, max_iter=50, pca_d=_data.shape[1], d=self.dimension, perplexity=perplexity, random_state=seed)
return data.tolist()
def get_data(self):
if self.method == 'pca':
return self.Pca()
elif self.method == 'tsne':
return self.Tsne()
else:
return
| [
"tsne.bh_sne",
"numpy.zeros",
"numpy.random.RandomState",
"numpy.hstack",
"scipy.linalg.svd",
"numpy.array",
"numpy.dot"
] | [((1034, 1048), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (1042, 1048), True, 'import numpy as np\n'), ((1603, 1635), 'scipy.linalg.svd', 'la.svd', (['cov'], {'full_matrices': '(False)'}), '(cov, full_matrices=False)\n', (1609, 1635), True, 'from scipy import linalg as la\n'), ((1859, 1878), 'numpy.array', 'np.array', (['self.data'], {}), '(self.data)\n', (1867, 1878), True, 'import numpy as np\n'), ((1894, 1918), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (1915, 1918), True, 'import numpy as np\n'), ((2008, 2120), 'tsne.bh_sne', 'bh_sne', (['_data'], {'max_iter': '(50)', 'pca_d': '_data.shape[1]', 'd': 'self.dimension', 'perplexity': 'perplexity', 'random_state': 'seed'}), '(_data, max_iter=50, pca_d=_data.shape[1], d=self.dimension,\n perplexity=perplexity, random_state=seed)\n', (2014, 2120), False, 'from tsne import bh_sne\n'), ((1167, 1219), 'numpy.zeros', 'np.zeros', ([], {'shape': '(data.shape[0], 1)', 'dtype': 'data.dtype'}), '(shape=(data.shape[0], 1), dtype=data.dtype)\n', (1175, 1219), True, 'import numpy as np\n'), ((1239, 1261), 'numpy.hstack', 'np.hstack', (['(data, pad)'], {}), '((data, pad))\n', (1248, 1261), True, 'import numpy as np\n'), ((1548, 1568), 'numpy.dot', 'np.dot', (['data.T', 'data'], {}), '(data.T, data)\n', (1554, 1568), True, 'import numpy as np\n'), ((1686, 1701), 'numpy.dot', 'np.dot', (['data', 'u'], {}), '(data, u)\n', (1692, 1701), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright INRIA
# Contributors: <NAME> (<EMAIL>)
# <NAME> (<EMAIL>)
#
# This software is governed by the CeCILL license under French law and abiding
# by the rules of distribution of free software. You can use, modify and/ or
# redistribute the software under the terms of the CeCILL license as circulated
# by CEA, CNRS and INRIA at the following URL
# http://www.cecill.info/index.en.html.
#
# As a counterpart to the access to the source code and rights to copy, modify
# and redistribute granted by the license, users are provided only with a
# limited warranty and the software's author, the holder of the economic
# rights, and the successive licensors have only limited liability.
#
# In this respect, the user's attention is drawn to the risks associated with
# loading, using, modifying and/or developing or reproducing the software by
# the user in light of its specific status of free software, that may mean that
# it is complicated to manipulate, and that also therefore means that it is
# reserved for developers and experienced professionals having in-depth
# computer knowledge. Users are therefore encouraged to load and test the
# software's suitability as regards their requirements in conditions enabling
# the security of their systems and/or data to be ensured and, more generally,
# to use and operate it in the same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL license and that you accept its terms.
# -----------------------------------------------------------------------------
import os
import numpy as np
from parameters import *
def disc(shape=(1024,1024), center=(512,512), radius = 512):
''' Generate a numpy array containing a disc. '''
def distance(x,y):
return (x-center[0])**2+(y-center[1])**2
D = np.fromfunction(distance,shape)
return np.where(D<radius*radius,1.0,0.0)
def gaussian(shape=(25,25), width=0.5, center=0.0):
''' Generate a gaussian of the form g(x) = height*exp(-(x-center)**2/width**2). '''
if type(shape) in [float,int]:
shape = (shape,)
if type(width) in [float,int]:
width = (width,)*len(shape)
if type(center) in [float,int]:
center = (center,)*len(shape)
grid=[]
for size in shape:
grid.append (slice(0,size))
C = np.mgrid[tuple(grid)]
R = np.zeros(shape)
for i,size in enumerate(shape):
if shape[i] > 1:
R += (((C[i]/float(size-1))*2 - 1 - center[i])/width[i])**2
return np.exp(-R/2)
def stimulus(position, size, intensity):
"""
Parameters
----------
position : (rho,theta) (degrees)
size : float (degrees)
intensity: float
"""
x,y = cartesian(position[0]/90.0, np.pi*position[1]/180.0)
Y,X = np.mgrid[0:shape[0],0:shape[1]]
X = X/float(shape[1])
Y = 2*Y/float(shape[0])-1
R = (X-x)**2+(Y-y)**2
return np.exp(-0.5*R/(size/90.0))
def best_fft_shape(shape):
"""
This function returns the best shape for computing a fft
From fftw.org:
FFTW is best at handling sizes of the form 2^a*3^b*5^c*7^d*11^e*13^f,
where e+f is either 0 or 1,
From http://www.netlib.org/fftpack/doc
"the method is most efficient when n is a product of small primes."
-> What is small ?
"""
# fftpack (not sure of the base)
base = [13,11,7,5,3,2]
# fftw
# base = [13,11,7,5,3,2]
def factorize(n):
if n == 0:
raise(RuntimeError, "Length n must be positive integer")
elif n == 1:
return [1,]
factors = []
for b in base:
while n % b == 0:
n /= b
factors.append(b)
if n == 1:
return factors
return []
def is_optimal(n):
factors = factorize(n)
# fftpack
return len(factors) > 0
# fftw
# return len(factors) > 0 and factors[:2] not in [[13,13],[13,11],[11,11]]
shape = np.atleast_1d(np.array(shape))
for i in range(shape.size):
while not is_optimal(shape[i]):
shape[i] += 1
return shape.astype(int)
| [
"numpy.zeros",
"numpy.where",
"numpy.array",
"numpy.exp",
"numpy.fromfunction"
] | [((1963, 1995), 'numpy.fromfunction', 'np.fromfunction', (['distance', 'shape'], {}), '(distance, shape)\n', (1978, 1995), True, 'import numpy as np\n'), ((2006, 2045), 'numpy.where', 'np.where', (['(D < radius * radius)', '(1.0)', '(0.0)'], {}), '(D < radius * radius, 1.0, 0.0)\n', (2014, 2045), True, 'import numpy as np\n'), ((2495, 2510), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (2503, 2510), True, 'import numpy as np\n'), ((2655, 2669), 'numpy.exp', 'np.exp', (['(-R / 2)'], {}), '(-R / 2)\n', (2661, 2669), True, 'import numpy as np\n'), ((3047, 3079), 'numpy.exp', 'np.exp', (['(-0.5 * R / (size / 90.0))'], {}), '(-0.5 * R / (size / 90.0))\n', (3053, 3079), True, 'import numpy as np\n'), ((4148, 4163), 'numpy.array', 'np.array', (['shape'], {}), '(shape)\n', (4156, 4163), True, 'import numpy as np\n')] |
import logging
import os
import re
from itertools import chain
from threading import Lock
import nltk
import numpy as np
import requests
from gensim.models import Word2Vec
from nltk import word_tokenize, WordNetLemmatizer, SnowballStemmer
from nltk.corpus import wordnet, stopwords
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
logger = logging.getLogger(__name__)
# Ensure that modules are downloaded in advance
# nltk averaged_perceptron_tagger required for nltk.pos_tag
# nltk punkt required for word_tokenize
# nltk stopwords
# nltk wordnet
NLTK_STOP_WORDS_SET = set(stopwords.words('english'))
# Lock to support multithreading for NLTK
# See https://github.com/nltk/nltk/issues/1576
NLTK_LOCK = Lock()
def vectorize_corpus(df, max_features, min_df, max_df, test=False):
"""
Create vectorization for papers in df.
:param df: papers dataframe
:param max_features: Maximum vocabulary size
:param min_df: Ignore tokens with frequency lower than given threshold
:param max_df: Ignore tokens with frequency higher than given threshold
:param test:
:return: Return list of list of sentences for each paper, tokens, and counts matrix
"""
papers_sentences_corpus = build_stemmed_corpus(df)
logger.debug(f'Vectorize corpus of {len(df)} papers')
counts = None
while counts is None:
try:
vectorizer = CountVectorizer(
min_df=min_df,
max_df=max_df if not test else 1.0,
max_features=max_features,
preprocessor=lambda t: t,
tokenizer=lambda t: t
)
counts = vectorizer.fit_transform([list(chain(*sentences)) for sentences in papers_sentences_corpus])
except:
# Workaround for exception After pruning, no terms remain.
logger.debug(f'Failed to build counts for vector for min_df={min_df}, max_df={max_df}, adjusting')
min_df = max(0.0, min_df - 0.1)
max_df = min(1.0, max_df + 0.1)
logger.debug(f'Vectorized corpus size {counts.shape}')
tokens_counts = np.asarray(np.sum(counts, axis=0)).reshape(-1)
tokens_freqs = tokens_counts / len(df)
logger.debug(f'Tokens frequencies min={tokens_freqs.min()}, max={tokens_freqs.max()}, '
f'mean={tokens_freqs.mean()}, std={tokens_freqs.std()}')
corpus_tokens = vectorizer.get_feature_names()
corpus_tokens_set = set(corpus_tokens)
# Filter tokens left after vectorization
filtered_corpus = [
[[t for t in sentence if t in corpus_tokens_set] for sentence in paper_sentences]
for paper_sentences in papers_sentences_corpus
]
return filtered_corpus, corpus_tokens, counts
def get_frequent_tokens(tokens, fraction=0.1, min_tokens=20):
"""
Compute tokens weighted frequencies
:param tokens List of tokens
:param fraction: fraction of most common tokens
:param min_tokens: minimal number of tokens to return
:return: dictionary {token: frequency}
"""
counter = nltk.Counter(tokens)
result = {}
tokens = len(counter)
for token, cnt in counter.most_common(max(min_tokens, int(tokens * fraction))):
result[token] = cnt / tokens
return result
# Convert pos_tag output to WordNetLemmatizer tags
try:
NLTK_LOCK.acquire()
NLTK_POS_TAG_TO_WORDNET = dict(JJ=wordnet.ADJ, NN=wordnet.NOUN, VB=wordnet.VERB, RB=wordnet.ADV)
finally:
NLTK_LOCK.release()
def stemmed_tokens(text, min_token_length=4):
# Tokenize text
tokens = word_tokenize(re.sub(r'[\'-]+', ' ', text.lower()))
# Ignore stop words, take into accounts nouns and adjectives, fix plural forms
lemmatizer = WordNetLemmatizer()
lemmas = [lemmatizer.lemmatize(token, pos=NLTK_POS_TAG_TO_WORDNET[pos[:2]])
for token, pos in nltk.pos_tag(tokens)
if len(token) >= min_token_length
and token not in NLTK_STOP_WORDS_SET
and pos[:2] in NLTK_POS_TAG_TO_WORDNET]
# Apply stemming to reduce word length,
# later shortest word will be used as actual word
stemmer = SnowballStemmer('english')
return [(stemmer.stem(token), token) for token in lemmas]
def build_stemmed_corpus(df):
""" Tokenization is done in several steps
1. Lemmatization: Ignore stop words, take into accounts nouns and adjectives, fix plural forms
2. Stemming: reducing words
3. Matching stems to a shortest existing lemma in texts
"""
logger.info(f'Building corpus from {len(df)} papers')
logger.info(f'Processing stemming for all papers')
papers_stemmed_sentences = [None] * len(df)
# NOTE: we split mesh and keywords by commas into separate sentences
for i, (title, abstract, mesh, keywords) in enumerate(zip(df['title'],
df['abstract'],
df['mesh'].replace(',', '.'),
df['keywords'].replace(',', '.'))):
if i % 1000 == 1:
logger.debug(f'Processed {i} papers')
papers_stemmed_sentences[i] = [
stemmed_tokens(sentence)
for sentence in f'{title}.{abstract}.{mesh}.{keywords}'.split('.')
if len(sentence.strip()) > 0
]
logger.debug(f'Done processing stemming for {len(df)} papers')
logger.info('Creating global shortest stem to word map')
stems_tokens_map = _build_stems_to_tokens_map(chain(*chain(*papers_stemmed_sentences)))
logger.info('Creating stemmed corpus')
return [[[stems_tokens_map.get(s, s) for s, _ in stemmed] for stemmed in sentence]
for sentence in papers_stemmed_sentences]
def _build_stems_to_tokens_map(stems_and_tokens):
""" Build a map to substitute each stem with the shortest word if word is different """
stems_tokens_map = {}
for stem, token in stems_and_tokens:
if stem != token: # Ignore tokens similar to stems
if stem in stems_tokens_map:
if len(stems_tokens_map[stem]) > len(token):
stems_tokens_map[stem] = token
else:
stems_tokens_map[stem] = token
return stems_tokens_map
# Launch with Docker address or locally
FASTTEXT_URL = os.getenv('FASTTEXT_URL', 'http://localhost:8081')
def tokens_embeddings(corpus, corpus_tokens, test=False):
if test:
logger.debug(f'Compute words embeddings trained word2vec')
return train_word2vec(corpus, corpus_tokens, test=test)
# Don't use model as is, since each celery process will load it's own copy.
# Shared model is available via additional service with single model.
logger.debug(f'Fetch embeddings from microservice')
try:
r = requests.request(
url=f'{FASTTEXT_URL}/fasttext',
method='GET',
json=corpus_tokens,
headers={'Accept': 'application/json'}
)
if r.status_code == 200:
return np.array(r.json()['embeddings']).reshape(len(corpus_tokens), 300)
else:
logger.debug(f'Wrong response code {r.status_code}')
except Exception as e:
logger.debug(f'Failed to fetch embeddings ${e.message}')
logger.debug('Fallback to in-house word2vec')
return train_word2vec(corpus, corpus_tokens, test=test)
def train_word2vec(corpus, corpus_tokens, vector_size=64, test=False):
logger.debug('Collecting sentences across dataset')
sentences = list(filter(
lambda l: test or len(l) >= 5, # Ignore short sentences, less than window
chain.from_iterable(corpus)))
logger.debug(f'Total {len(sentences)} sentences')
logger.debug('Training word2vec model')
w2v = Word2Vec(sentences, vector_size=vector_size, window=5, min_count=0, workers=1, seed=42)
logger.debug('Retrieve word embeddings, corresponding subjects and reorder according to corpus_terms')
ids, embeddings = w2v.wv.index_to_key, w2v.wv.vectors
indx = {t: i for i, t in enumerate(ids)}
return np.array([
embeddings[indx[t]] if t in indx else np.zeros(embeddings.shape[1]) # Process missing embeddings
for t in corpus_tokens
])
def texts_embeddings(corpus_counts, tokens_w2v_embeddings):
"""
Computes texts embeddings as TF-IDF weighted average of word2vec words embeddings.
:param corpus_counts: Vectorized papers matrix
:param tokens_w2v_embeddings: Tokens word2vec embeddings
:return: numpy array [publications x embeddings]
"""
logger.debug('Compute TF-IDF on tokens counts')
tfidf_transformer = TfidfTransformer()
tfidf = tfidf_transformer.fit_transform(corpus_counts)
logger.debug(f'TFIDF shape {tfidf.shape}')
logger.debug('Compute text embeddings as TF-IDF weighted average of word2vec tokens embeddings')
texts_embeddings = np.array([
np.mean((tokens_w2v_embeddings.T * tfidf[i, :].T).T, axis=0) for i in range(tfidf.shape[0])
])
logger.debug(f'Texts embeddings shape: {texts_embeddings.shape}')
return texts_embeddings
| [
"itertools.chain.from_iterable",
"sklearn.feature_extraction.text.CountVectorizer",
"numpy.sum",
"nltk.WordNetLemmatizer",
"nltk.SnowballStemmer",
"gensim.models.Word2Vec",
"numpy.zeros",
"threading.Lock",
"nltk.Counter",
"numpy.mean",
"nltk.corpus.stopwords.words",
"itertools.chain",
"nltk.... | [((371, 398), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (388, 398), False, 'import logging\n'), ((736, 742), 'threading.Lock', 'Lock', ([], {}), '()\n', (740, 742), False, 'from threading import Lock\n'), ((6335, 6385), 'os.getenv', 'os.getenv', (['"""FASTTEXT_URL"""', '"""http://localhost:8081"""'], {}), "('FASTTEXT_URL', 'http://localhost:8081')\n", (6344, 6385), False, 'import os\n'), ((606, 632), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (621, 632), False, 'from nltk.corpus import wordnet, stopwords\n'), ((3060, 3080), 'nltk.Counter', 'nltk.Counter', (['tokens'], {}), '(tokens)\n', (3072, 3080), False, 'import nltk\n'), ((3711, 3730), 'nltk.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (3728, 3730), False, 'from nltk import word_tokenize, WordNetLemmatizer, SnowballStemmer\n'), ((4130, 4156), 'nltk.SnowballStemmer', 'SnowballStemmer', (['"""english"""'], {}), "('english')\n", (4145, 4156), False, 'from nltk import word_tokenize, WordNetLemmatizer, SnowballStemmer\n'), ((7789, 7881), 'gensim.models.Word2Vec', 'Word2Vec', (['sentences'], {'vector_size': 'vector_size', 'window': '(5)', 'min_count': '(0)', 'workers': '(1)', 'seed': '(42)'}), '(sentences, vector_size=vector_size, window=5, min_count=0, workers\n =1, seed=42)\n', (7797, 7881), False, 'from gensim.models import Word2Vec\n'), ((8659, 8677), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {}), '()\n', (8675, 8677), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\n'), ((6822, 6949), 'requests.request', 'requests.request', ([], {'url': 'f"""{FASTTEXT_URL}/fasttext"""', 'method': '"""GET"""', 'json': 'corpus_tokens', 'headers': "{'Accept': 'application/json'}"}), "(url=f'{FASTTEXT_URL}/fasttext', method='GET', json=\n corpus_tokens, headers={'Accept': 'application/json'})\n", (6838, 6949), False, 'import requests\n'), ((1404, 1550), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'min_df': 'min_df', 'max_df': '(max_df if not test else 1.0)', 'max_features': 'max_features', 'preprocessor': '(lambda t: t)', 'tokenizer': '(lambda t: t)'}), '(min_df=min_df, max_df=max_df if not test else 1.0,\n max_features=max_features, preprocessor=lambda t: t, tokenizer=lambda t: t)\n', (1419, 1550), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\n'), ((3843, 3863), 'nltk.pos_tag', 'nltk.pos_tag', (['tokens'], {}), '(tokens)\n', (3855, 3863), False, 'import nltk\n'), ((7651, 7678), 'itertools.chain.from_iterable', 'chain.from_iterable', (['corpus'], {}), '(corpus)\n', (7670, 7678), False, 'from itertools import chain\n'), ((8928, 8988), 'numpy.mean', 'np.mean', (['(tokens_w2v_embeddings.T * tfidf[i, :].T).T'], {'axis': '(0)'}), '((tokens_w2v_embeddings.T * tfidf[i, :].T).T, axis=0)\n', (8935, 8988), True, 'import numpy as np\n'), ((2131, 2153), 'numpy.sum', 'np.sum', (['counts'], {'axis': '(0)'}), '(counts, axis=0)\n', (2137, 2153), True, 'import numpy as np\n'), ((5542, 5574), 'itertools.chain', 'chain', (['*papers_stemmed_sentences'], {}), '(*papers_stemmed_sentences)\n', (5547, 5574), False, 'from itertools import chain\n'), ((8155, 8184), 'numpy.zeros', 'np.zeros', (['embeddings.shape[1]'], {}), '(embeddings.shape[1])\n', (8163, 8184), True, 'import numpy as np\n'), ((1693, 1710), 'itertools.chain', 'chain', (['*sentences'], {}), '(*sentences)\n', (1698, 1710), False, 'from itertools import chain\n')] |
import numpy as np
import scipy.linalg as la
from bh_sne import BH_SNE
def bh_sne(
data,
pca_d=None,
d=2,
perplexity=30.0,
theta=0.5,
random_state=None,
copy_data=False,
verbose=False,
):
"""
Run Barnes-Hut T-SNE on _data_.
@param data The data.
@param pca_d The dimensionality of data is reduced via PCA
to this dimensionality.
@param d The embedding dimensionality. Must be fixed to
2.
@param perplexity The perplexity controls the effective number of
neighbors.
@param theta If set to 0, exact t-SNE is run, which takes
very long for dataset > 5000 samples.
@param random_state A numpy RandomState object; if None, use
the numpy.random singleton. Init the RandomState
with a fixed seed to obtain consistent results
from run to run.
@param copy_data Copy the data to prevent it from being modified
by the C code
@param verbose Verbose output from the training process
"""
N, _ = data.shape
if pca_d is None:
if copy_data:
X = np.copy(data)
else:
X = data
else:
# do PCA
data -= data.mean(axis=0)
# working with covariance + (svd on cov.) is
# much faster than svd on data directly.
cov = np.dot(data.T, data) / N
u, s, v = la.svd(cov, full_matrices=False)
u = u[:, 0:pca_d]
X = np.dot(data, u)
if random_state is None:
seed = np.random.randint(2 ** 32 - 1)
else:
seed = random_state.randint(2 ** 32 - 1)
bh_tsne = BH_SNE()
Y = bh_tsne.run(X, N, X.shape[1], d, perplexity, theta, seed, verbose)
return Y
| [
"numpy.copy",
"bh_sne.BH_SNE",
"scipy.linalg.svd",
"numpy.random.randint",
"numpy.dot"
] | [((1785, 1793), 'bh_sne.BH_SNE', 'BH_SNE', ([], {}), '()\n', (1791, 1793), False, 'from bh_sne import BH_SNE\n'), ((1548, 1580), 'scipy.linalg.svd', 'la.svd', (['cov'], {'full_matrices': '(False)'}), '(cov, full_matrices=False)\n', (1554, 1580), True, 'import scipy.linalg as la\n'), ((1619, 1634), 'numpy.dot', 'np.dot', (['data', 'u'], {}), '(data, u)\n', (1625, 1634), True, 'import numpy as np\n'), ((1680, 1710), 'numpy.random.randint', 'np.random.randint', (['(2 ** 32 - 1)'], {}), '(2 ** 32 - 1)\n', (1697, 1710), True, 'import numpy as np\n'), ((1278, 1291), 'numpy.copy', 'np.copy', (['data'], {}), '(data)\n', (1285, 1291), True, 'import numpy as np\n'), ((1505, 1525), 'numpy.dot', 'np.dot', (['data.T', 'data'], {}), '(data.T, data)\n', (1511, 1525), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
#
# Copyright (c) <NAME> and the University of Texas MD Anderson Cancer Center
# Distributed under the terms of the 3-clause BSD License.
from collections import Sequence
from sos.utils import short_repr, env
import numpy
import pandas
import json
Ruby_init_statement = r'''
require 'daru'
require 'nmatrix'
def __Ruby_py_repr(obj)
if obj.is_a? Integer
return obj.inspect
elsif obj.is_a? String
return obj.inspect
elsif obj.is_a? TrueClass
return "True"
elsif obj.is_a? FalseClass
return "False"
elsif obj.is_a? Float
return obj.inspect
elsif obj.nil?
return "None"
elsif obj.is_a? Set
return "{" + (obj.map { |indivial_var| __Ruby_py_repr(indivial_var) } ).join(",") + "}"
elsif obj.is_a? Range
return "range(" + obj.min().inspect + "," + (obj.max()+1).inspect + ")"
elsif obj.is_a? Array
return '[' + (obj.map { |indivial_var| __Ruby_py_repr(indivial_var) } ).join(",") + ']'
elsif obj.is_a? Hash
_beginning_result_string_hash_from_ruby = "{"
_context_result_string_hash_from_ruby = (obj.keys.map do |x|
if obj[x].is_a? Array then
"\"" + x.to_s + "\":" + (obj[x].to_a.map { |y| eval(__Ruby_py_repr(y)) }).to_s
else
"\"" + x.to_s + "\":" + (__Ruby_py_repr(obj[x])).to_s
end
end).join(",") + "}"
_result_string_hash_from_ruby = _beginning_result_string_hash_from_ruby + _context_result_string_hash_from_ruby
return _result_string_hash_from_ruby
elsif obj.is_a? Daru::DataFrame
_beginning_result_string_dataframe_from_ruby = "pandas.DataFrame(" + "{"
_context_result_string_dataframe_from_ruby = (obj.vectors.to_a.map { |x| "\"" + x.to_s + "\":" + (obj[x].to_a.map { |y| eval(__Ruby_py_repr(y)) }).to_s } ).join(",")
_indexing_result_string_dataframe_from_ruby = "}," + "index=" + obj.index.to_a.to_s + ")"
_result_string_dataframe_from_ruby = _beginning_result_string_dataframe_from_ruby + _context_result_string_dataframe_from_ruby + _indexing_result_string_dataframe_from_ruby
return _result_string_dataframe_from_ruby
elsif obj.is_a? NMatrix
return "numpy.matrix(" + obj.to_a.to_s + ")"
elsif obj.is_a? Complex
return "complex(" + obj.real.inspect + "," + obj.imaginary.inspect + ")"
else
return "'Untransferrable variable'"
end
end
'''
#
# support for %get
#
# Converting a Python object to a JSON format to be loaded by Ruby
#
class sos_Ruby:
supported_kernels = {'Ruby': ['ruby']}
background_color = '#e8c2be'
options = {}
cd_command = 'Dir.chdir {dir!r}'
def __init__(self, sos_kernel, kernel_name='ruby'):
self.sos_kernel = sos_kernel
self.kernel_name = kernel_name
self.init_statements = Ruby_init_statement
def _Ruby_repr(self, obj):
if isinstance(obj, bool):
return 'true' if obj else 'false'
elif isinstance(obj, float) and numpy.isnan(obj):
return "Float::NAN"
elif isinstance(obj, (int, float)):
return repr(obj)
elif isinstance(obj, str):
return '%(' + obj + ')'
elif isinstance(obj, complex):
return 'Complex(' + str(obj.real) + ',' + str(obj.imag) + ')'
elif isinstance(obj, range):
return '(' + repr(min(obj)) + '...' + repr(max(obj)) + ')'
elif isinstance(obj, Sequence):
if len(obj) == 0:
return '[]'
else:
return '[' + ','.join(self._Ruby_repr(x) for x in obj) + ']'
elif obj is None:
return 'nil'
elif isinstance(obj, dict):
return '{' + ','.join('"{}" => {}'.format(x, self._Ruby_repr(y)) for x, y in obj.items()) + '}'
elif isinstance(obj, set):
return 'Set[' + ','.join(self._Ruby_repr(x) for x in obj) + ']'
else:
if isinstance(obj, (numpy.intc, numpy.intp, numpy.int8, numpy.int16, numpy.int32, numpy.int64,\
numpy.uint8, numpy.uint16, numpy.uint32, numpy.uint64, numpy.float16, numpy.float32, numpy.float64)):
return repr(obj)
elif isinstance(obj, numpy.matrixlib.defmatrix.matrix):
return 'N' + repr(obj.tolist())
elif isinstance(obj, numpy.ndarray):
return repr(obj.tolist())
elif isinstance(obj, pandas.DataFrame):
_beginning_result_string_dataframe_to_ruby = "Daru::DataFrame.new({"
_context_string_dataframe_to_ruby = str(['"'
+ str(x).replace("'", '"')
+ '"'
+ "=>"
+ "["
+ str(
",".join(
list(
map(
lambda y: self._Ruby_repr(y),
obj[x].tolist()
)
)
)
).replace("'", '"') + "]"
for x in obj.keys().tolist()])[2:-2].replace("\', \'", ", ") + "},"
_indexing_result_string_dataframe_to_ruby = "index:" + str(obj.index.values.tolist()).replace("'", '"') + ")"
_result_string_dataframe_to_ruby = _beginning_result_string_dataframe_to_ruby + _context_string_dataframe_to_ruby + _indexing_result_string_dataframe_to_ruby
return _result_string_dataframe_to_ruby
elif isinstance(obj, pandas.Series):
dat=list(obj.values)
ind=list(obj.index.values)
ans="{" + ",".join([repr(x) + "=>" + repr(y) for x, y in zip(ind, dat)]) + "}"
return ans
else:
return repr('Unsupported datatype {}'.format(short_repr(obj)))
def get_vars(self, names):
for name in names:
newname = name
ruby_repr = self._Ruby_repr(env.sos_dict[name])
self.sos_kernel.run_cell('{} = {}'.format(newname, ruby_repr), True, False,
on_error='Failed to put variable {} to Ruby'.format(name))
def put_vars(self, items, to_kernel=None):
# first let us get all variables with names starting with sos
try:
response = self.sos_kernel.get_response('print local_variables', ('stream',), name=('stdout',))[0][1]
all_vars = response['text']
items += [x for x in all_vars[1:-1].split(", ") if x.startswith(":sos")]
except:
# if there is no variable with name sos, the command will not produce any output
pass
res = {}
for item in items:
py_repr = 'print(__Ruby_py_repr({}))'.format(item)
response = self.sos_kernel.get_response(py_repr, ('stream',), name=('stdout',))[0][1]
expr = response['text']
self.sos_kernel.warn(repr(expr))
try:
# evaluate as raw string to correctly handle \\ etc
res[item] = eval(expr)
except Exception as e:
self.sos_kernel.warn('Failed to evaluate {!r}: {}'.format(expr, e))
return None
return res
def sessioninfo(self):
response = self.sos_kernel.get_response(r'RUBY_VERSION', ('stream',), name=('stdout',))
return response['text']
| [
"numpy.isnan",
"sos.utils.short_repr"
] | [((3145, 3161), 'numpy.isnan', 'numpy.isnan', (['obj'], {}), '(obj)\n', (3156, 3161), False, 'import numpy\n'), ((6550, 6565), 'sos.utils.short_repr', 'short_repr', (['obj'], {}), '(obj)\n', (6560, 6565), False, 'from sos.utils import short_repr, env\n')] |
import math
import numpy as np
import torch
from torch.optim.optimizer import Optimizer, required
class RAdam(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.buffer = [[None, None, None] for ind in range(10)]
super(RAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(RAdam, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError("RAdam does not support sparse gradients")
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state["step"] = 0
state["exp_avg"] = torch.zeros_like(p_data_fp32)
state["exp_avg_sq"] = torch.zeros_like(p_data_fp32)
else:
state["exp_avg"] = state["exp_avg"].type_as(p_data_fp32)
state["exp_avg_sq"] = state["exp_avg_sq"].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1, beta2 = group["betas"]
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state["step"] += 1
buffered = self.buffer[int(state["step"] % 10)]
if state["step"] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state["step"]
beta2_t = beta2 ** state["step"]
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state["step"] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = (
group["lr"]
* math.sqrt(
(1 - beta2_t)
* (N_sma - 4)
/ (N_sma_max - 4)
* (N_sma - 2)
/ N_sma
* N_sma_max
/ (N_sma_max - 2)
)
/ (1 - beta1 ** state["step"])
)
else:
step_size = group["lr"] / (1 - beta1 ** state["step"])
buffered[2] = step_size
if group["weight_decay"] != 0:
p_data_fp32.add_(-group["weight_decay"] * group["lr"], p_data_fp32)
# more conservative since it's an approximated value
if N_sma >= 5:
denom = exp_avg_sq.sqrt().add_(group["eps"])
p_data_fp32.addcdiv_(-step_size, exp_avg, denom)
else:
p_data_fp32.add_(-step_size, exp_avg)
p.data.copy_(p_data_fp32)
return loss
class EarlyStopping(object):
"""EarlyStop for pytorch
refer to
https://gist.github.com/stefanonardo/693d96ceb2f531fa05db530f3e21517d
TODO check if fastai has buildin support for this
Args:
Returns:
"""
def __init__(self, mode="min", min_delta=0, patience=10, percentage=False):
self.mode = mode
self.min_delta = min_delta
self.patience = patience
self.best = None
self.num_bad_epochs = 0
self.is_better = None
self._init_is_better(mode, min_delta, percentage)
if patience == 0:
self.is_better = lambda a, b: True
self.step = lambda a: False
def step(self, metrics):
if self.best is None:
self.best = metrics
return False
if np.isnan(metrics):
return True
if self.is_better(metrics, self.best):
self.num_bad_epochs = 0
self.best = metrics
else:
self.num_bad_epochs += 1
if self.num_bad_epochs >= self.patience:
return True
return False
def _init_is_better(self, mode, min_delta, percentage):
if mode not in {"min", "max"}:
raise ValueError("mode " + mode + " is unknown!")
if not percentage:
if mode == "min":
self.is_better = lambda a, best: a < best - min_delta
if mode == "max":
self.is_better = lambda a, best: a > best + min_delta
else:
if mode == "min":
self.is_better = lambda a, best: a < best - (best * min_delta / 100)
if mode == "max":
self.is_better = lambda a, best: a > best + (best * min_delta / 100)
| [
"torch.zeros_like",
"math.sqrt",
"numpy.isnan"
] | [((4293, 4310), 'numpy.isnan', 'np.isnan', (['metrics'], {}), '(metrics)\n', (4301, 4310), True, 'import numpy as np\n'), ((1113, 1142), 'torch.zeros_like', 'torch.zeros_like', (['p_data_fp32'], {}), '(p_data_fp32)\n', (1129, 1142), False, 'import torch\n'), ((1185, 1214), 'torch.zeros_like', 'torch.zeros_like', (['p_data_fp32'], {}), '(p_data_fp32)\n', (1201, 1214), False, 'import torch\n'), ((2379, 2491), 'math.sqrt', 'math.sqrt', (['((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma *\n N_sma_max / (N_sma_max - 2))'], {}), '((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) /\n N_sma * N_sma_max / (N_sma_max - 2))\n', (2388, 2491), False, 'import math\n')] |
import unyt as u
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.rc("font", family="serif")
def main():
data_nd = pd.read_csv("results_nd.csv")
fig, ax = plt.subplots()
ax.errorbar(
data_nd["mu-cassandra_kJmol"],
data_nd["press_bar"],
yerr=[2 * p for p in data_nd["press-stdev_bar"]],
fmt="s",
markersize=8,
color="#0C2340",
alpha=0.7,
)
ax.set_yscale("log")
ax.set_xlabel("$\mu'$, kJ/mol", fontsize=14, labelpad=15)
ax.set_ylabel("Pressure, bar", fontsize=14, labelpad=15)
ax.tick_params(axis="both", which="major", labelsize=12)
fig.tight_layout()
fig.savefig("chempot-nd.pdf")
data_ws = pd.read_csv("results_ws.txt", sep="\s+")
mus_ws = data_ws["ChemPot_K"].values * u.K * u.kb
press_ws = data_ws["P_bar"].values * u.bar
# @300 K, https://www.nist.gov/mml/csd/informatics/sat-tmmc-liquid-vapor-coexistence-properties-spce-water-lrc
psat_nist = 1.017e-02 * u.bar
fig, ax = plt.subplots()
# Plot ND results
ax.scatter(
data_nd["mu-cassandra_kJmol"],
data_nd["press_bar"],
marker="s",
s=50,
c="#0C2340",
alpha=0.9,
label="Notre Dame",
)
# Plot WS results
ax.scatter(
mus_ws.to_value("kJ/mol"),
press_ws.to_value("bar"),
marker="o",
s=50,
c="#406b46",
alpha=0.4,
label="Wayne State reported $\mu$",
)
# Plot shifted WS results
mass_water = 18.015 * u.amu
temperature = 298.0 * u.K
debroglie = u.h / np.sqrt(2 * np.pi * mass_water * u.kb * temperature)
ws_offset = 3 * u.kb * temperature * np.log(debroglie.to_value(u.angstrom))
ax.scatter(
mus_ws.to_value("kJ/mol") + ws_offset.to_value("kJ/mol"),
press_ws.to_value("bar"),
marker="o",
s=50,
c="#406b46",
alpha=0.9,
label="Wayne State $\mu + 3RTln(\Lambda)$",
)
# Plot NIST Pvap
ax.axhline(
psat_nist.to_value("bar"),
color="black",
ls="--",
label="NIST SPC/E $P^{sat}$"
)
ax.set_yscale("log")
ax.set_xlabel("$\mu'$, kJ/mol", fontsize=14, labelpad=15)
ax.set_ylabel("Pressure, bar", fontsize=14, labelpad=15)
ax.tick_params(axis="both", which="major", labelsize=12)
ax.legend()
fig.tight_layout()
fig.savefig("chempot-compare.pdf")
mass_density_ws = data_ws["Density_kg_per_mcubed"].values * u.kg / u.m ** 3
density_ws = mass_density_ws / mass_water
# @300 K, https://www.nist.gov/mml/csd/informatics/sat-tmmc-liquid-vapor-coexistence-properties-spce-water-lrc
mass_density_nist = 7.373e-03 * u.kg / u.m ** 3
density_nist = mass_density_nist / mass_water
fig, ax = plt.subplots()
# Plot ND results
ax.scatter(
data_nd["mu-cassandra_kJmol"],
(data_nd["density_molec-nm^3"].values / u.nm ** 3).to_value(
"mol/dm**3"
),
marker="s",
s=50,
c="#0C2340",
alpha=0.9,
label="<NAME>",
)
# Plot WS results
ax.scatter(
mus_ws.to_value("kJ/mol") + ws_offset.to_value("kJ/mol"),
density_ws.to_value("mol/dm**3"),
marker="o",
s=50,
c="#406b46",
alpha=0.9,
label="Wayne State $\mu + 3RTln(\Lambda)$",
)
# Plot NIST SPC/E results
ax.axhline(
density_nist.to_value("mol/dm**3"),
color="black",
ls="--",
label=r"NIST SPC/E $\rho^{vap}$",
)
ax.set_yscale("log")
ax.set_xlabel("$\mu'$, kJ/mol", fontsize=14, labelpad=15)
ax.set_ylabel("Density, mol/dm$^3$", fontsize=14, labelpad=15)
ax.tick_params(axis="both", which="major", labelsize=12)
ax.legend()
fig.tight_layout()
fig.savefig("density-compare.pdf")
if __name__ == "__main__":
main()
| [
"pandas.read_csv",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.rc",
"numpy.sqrt"
] | [((89, 119), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (95, 119), True, 'import matplotlib.pyplot as plt\n'), ((149, 178), 'pandas.read_csv', 'pd.read_csv', (['"""results_nd.csv"""'], {}), "('results_nd.csv')\n", (160, 178), True, 'import pandas as pd\n'), ((193, 207), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (205, 207), True, 'import matplotlib.pyplot as plt\n'), ((723, 764), 'pandas.read_csv', 'pd.read_csv', (['"""results_ws.txt"""'], {'sep': '"""\\\\s+"""'}), "('results_ws.txt', sep='\\\\s+')\n", (734, 764), True, 'import pandas as pd\n'), ((1030, 1044), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1042, 1044), True, 'import matplotlib.pyplot as plt\n'), ((2795, 2809), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2807, 2809), True, 'import matplotlib.pyplot as plt\n'), ((1605, 1657), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi * mass_water * u.kb * temperature)'], {}), '(2 * np.pi * mass_water * u.kb * temperature)\n', (1612, 1657), True, 'import numpy as np\n')] |
import warnings
from xml.etree.ElementTree import Element
from base64 import b64encode
import types
from imageio import imwrite
import numpy as np
from copy import copy
from scipy import ndimage as ndi
import vispy.color
from ..base import Layer
from ..layer_utils import calc_data_range, increment_unnamed_colormap
from ...util.event import Event
from ...util.status_messages import format_float
from ._constants import Rendering, Interpolation
from ...util.colormaps import make_colorbar, AVAILABLE_COLORMAPS
from .image_utils import get_pyramid_and_rgb
class Image(Layer):
"""Image layer.
Parameters
----------
data : array or list of array
Image data. Can be N dimensional. If the last dimension has length
3 or 4 can be interpreted as RGB or RGBA if rgb is `True`. If a
list and arrays are decreasing in shape then the data is treated as
an image pyramid.
rgb : bool
Whether the image is rgb RGB or RGBA. If not specified by user and
the last dimension of the data has length 3 or 4 it will be set as
`True`. If `False` the image is interpreted as a luminance image.
is_pyramid : bool
Whether the data is an image pyramid or not. Pyramid data is
represented by a list of array like image data. If not specified by
the user and if the data is a list of arrays that decrease in shape
then it will be taken to be a pyramid. The first image in the list
should be the largest.
colormap : str, vispy.Color.Colormap, tuple, dict
Colormap to use for luminance images. If a string must be the name
of a supported colormap from vispy or matplotlib. If a tuple the
first value must be a string to assign as a name to a colormap and
the second item must be a Colormap. If a dict the key must be a
string to assign as a name to a colormap and the value must be a
Colormap.
contrast_limits : list (2,)
Color limits to be used for determining the colormap bounds for
luminance images. If not passed is calculated as the min and max of
the image.
gamma : float
Gamma correction for determining colormap linearity. Defaults to 1.
interpolation : str
Interpolation mode used by vispy. Must be one of our supported
modes.
iso_threshold : float
Threshold for isosurface.
name : str
Name of the layer.
metadata : dict
Layer metadata.
scale : tuple of float
Scale factors for the layer.
translate : tuple of float
Translation values for the layer.
opacity : float
Opacity of the layer visual, between 0.0 and 1.0.
blending : str
One of a list of preset blending modes that determines how RGB and
alpha values of the layer visual get mixed. Allowed values are
{'opaque', 'translucent', and 'additive'}.
visible : bool
Whether the layer visual is currently being displayed.
Attributes
----------
data : array
Image data. Can be N dimensional. If the last dimension has length
3 or 4 can be interpreted as RGB or RGBA if rgb is `True`. If a list
and arrays are decreaing in shape then the data is treated as an
image pyramid.
metadata : dict
Image metadata.
rgb : bool
Whether the image is rgb RGB or RGBA if rgb. If not
specified by user and the last dimension of the data has length 3 or 4
it will be set as `True`. If `False` the image is interpreted as a
luminance image.
is_pyramid : bool
Whether the data is an image pyramid or not. Pyramid data is
represented by a list of array like image data. The first image in the
list should be the largest.
colormap : 2-tuple of str, vispy.color.Colormap
The first is the name of the current colormap, and the second value is
the colormap. Colormaps are used for luminance images, if the image is
rgb the colormap is ignored.
colormaps : tuple of str
Names of the available colormaps.
contrast_limits : list (2,) of float
Color limits to be used for determining the colormap bounds for
luminance images. If the image is rgb the contrast_limits is ignored.
contrast_limits_range : list (2,) of float
Range for the color limits for luminace images. If the image is
rgb the contrast_limits_range is ignored.
gamma : float
Gamma correction for determining colormap linearity.
iso_threshold : float
Threshold for isosurface.
interpolation : str
Interpolation mode used by vispy. Must be one of our supported modes.
Extended Summary
----------
_data_view : array (N, M), (N, M, 3), or (N, M, 4)
Image data for the currently viewed slice. Must be 2D image data, but
can be multidimensional for RGB or RGBA images if multidimensional is
`True`.
_colorbar : array
Colorbar for current colormap.
"""
_colormaps = AVAILABLE_COLORMAPS
_max_tile_shape = 1600
def __init__(
self,
data,
*,
rgb=None,
is_pyramid=None,
colormap='gray',
contrast_limits=None,
gamma=1,
interpolation='nearest',
rendering='mip',
iso_threshold=0.5,
name=None,
metadata=None,
scale=None,
translate=None,
opacity=1,
blending='translucent',
visible=True,
):
if isinstance(data, types.GeneratorType):
data = list(data)
ndim, rgb, is_pyramid, data_pyramid = get_pyramid_and_rgb(
data, pyramid=is_pyramid, rgb=rgb
)
super().__init__(
ndim,
name=name,
metadata=metadata,
scale=scale,
translate=translate,
opacity=opacity,
blending=blending,
visible=visible,
)
self.events.add(
contrast_limits=Event,
gamma=Event,
colormap=Event,
interpolation=Event,
rendering=Event,
iso_threshold=Event,
)
# Set data
self.is_pyramid = is_pyramid
self.rgb = rgb
self._data = data
self._data_pyramid = data_pyramid
self._top_left = np.zeros(ndim, dtype=int)
if self.is_pyramid:
self._data_level = len(data_pyramid) - 1
else:
self._data_level = 0
# Intitialize image views and thumbnails with zeros
if self.rgb:
self._data_view = np.zeros(
(1,) * self.dims.ndisplay + (self.shape[-1],)
)
else:
self._data_view = np.zeros((1,) * self.dims.ndisplay)
self._data_raw = self._data_view
self._data_thumbnail = self._data_view
# Set contrast_limits and colormaps
self._gamma = gamma
self._iso_threshold = iso_threshold
self._colormap_name = ''
self._contrast_limits_msg = ''
if contrast_limits is None:
if self.is_pyramid:
input_data = self._data_pyramid[-1]
else:
input_data = self.data
self._contrast_limits_range = calc_data_range(input_data)
else:
self._contrast_limits_range = contrast_limits
self._contrast_limits = copy(self._contrast_limits_range)
self.colormap = colormap
self.contrast_limits = self._contrast_limits
self.interpolation = interpolation
self.rendering = rendering
# Trigger generation of view slice and thumbnail
self._update_dims()
@property
def data(self):
"""array: Image data."""
return self._data
@data.setter
def data(self, data):
ndim, rgb, is_pyramid, data_pyramid = get_pyramid_and_rgb(
data, pyramid=self.is_pyramid, rgb=self.rgb
)
self.is_pyramid = is_pyramid
self.rgb = rgb
self._data = data
self._data_pyramid = data_pyramid
self._update_dims()
self.events.data()
def _get_ndim(self):
"""Determine number of dimensions of the layer."""
return len(self.level_shapes[0])
def _get_extent(self):
return tuple((0, m) for m in self.level_shapes[0])
@property
def data_level(self):
"""int: Current level of pyramid, or 0 if image."""
return self._data_level
@data_level.setter
def data_level(self, level):
if self._data_level == level:
return
self._data_level = level
self.refresh()
@property
def level_shapes(self):
"""array: Shapes of each level of the pyramid or just of image."""
if self.is_pyramid:
if self.rgb:
shapes = [im.shape[:-1] for im in self._data_pyramid]
else:
shapes = [im.shape for im in self._data_pyramid]
else:
if self.rgb:
shapes = [self.data.shape[:-1]]
else:
shapes = [self.data.shape]
return np.array(shapes)
@property
def level_downsamples(self):
"""list: Downsample factors for each level of the pyramid."""
return np.divide(self.level_shapes[0], self.level_shapes)
@property
def top_left(self):
"""tuple: Location of top left canvas pixel in image."""
return self._top_left
@top_left.setter
def top_left(self, top_left):
if np.all(self._top_left == top_left):
return
self._top_left = top_left.astype(int)
self.refresh()
@property
def colormap(self):
"""2-tuple of str, vispy.color.Colormap: colormap for luminance images.
"""
return self._colormap_name, self._cmap
@colormap.setter
def colormap(self, colormap):
name = '[unnamed colormap]'
if isinstance(colormap, str):
name = colormap
elif isinstance(colormap, tuple):
name, cmap = colormap
self._colormaps[name] = cmap
elif isinstance(colormap, dict):
self._colormaps.update(colormap)
name = list(colormap)[0] # first key in dict
elif isinstance(colormap, vispy.color.Colormap):
name = increment_unnamed_colormap(
name, list(self._colormaps.keys())
)
self._colormaps[name] = colormap
else:
warnings.warn(f'invalid value for colormap: {colormap}')
name = self._colormap_name
self._colormap_name = name
self._cmap = self._colormaps[name]
self._colorbar = make_colorbar(self._cmap)
self._update_thumbnail()
self.events.colormap()
@property
def colormaps(self):
"""tuple of str: names of available colormaps."""
return tuple(self._colormaps.keys())
@property
def contrast_limits(self):
"""list of float: Limits to use for the colormap."""
return list(self._contrast_limits)
@contrast_limits.setter
def contrast_limits(self, contrast_limits):
self._contrast_limits_msg = (
format_float(contrast_limits[0])
+ ', '
+ format_float(contrast_limits[1])
)
self.status = self._contrast_limits_msg
self._contrast_limits = contrast_limits
if contrast_limits[0] < self._contrast_limits_range[0]:
self._contrast_limits_range[0] = copy(contrast_limits[0])
if contrast_limits[1] > self._contrast_limits_range[1]:
self._contrast_limits_range[1] = copy(contrast_limits[1])
self._update_thumbnail()
self.events.contrast_limits()
@property
def gamma(self):
return self._gamma
@gamma.setter
def gamma(self, value):
self.status = format_float(value)
self._gamma = value
self._update_thumbnail()
self.events.gamma()
@property
def iso_threshold(self):
"""float: threshold for isosurface."""
return self._iso_threshold
@iso_threshold.setter
def iso_threshold(self, value):
self.status = format_float(value)
self._iso_threshold = value
self._update_thumbnail()
self.events.iso_threshold()
@property
def interpolation(self):
"""{
'bessel', 'bicubic', 'bilinear', 'blackman', 'catrom', 'gaussian',
'hamming', 'hanning', 'hermite', 'kaiser', 'lanczos', 'mitchell',
'nearest', 'spline16', 'spline36'
}: Equipped interpolation method's name.
"""
return str(self._interpolation)
@interpolation.setter
def interpolation(self, interpolation):
if isinstance(interpolation, str):
interpolation = Interpolation(interpolation)
self._interpolation = interpolation
self.events.interpolation()
@property
def rendering(self):
"""Rendering: Rendering mode.
Selects a preset rendering mode in vispy that determines how
volume is displayed
* translucent: voxel colors are blended along the view ray until
the result is opaque.
* mip: maxiumum intensity projection. Cast a ray and display the
maximum value that was encountered.
* additive: voxel colors are added along the view ray until
the result is saturated.
* iso: isosurface. Cast a ray until a certain threshold is
encountered. At that location, lighning calculations are
performed to give the visual appearance of a surface.
"""
return str(self._rendering)
@rendering.setter
def rendering(self, rendering):
if isinstance(rendering, str):
rendering = Rendering(rendering)
self._rendering = rendering
self.events.rendering()
def _raw_to_displayed(self, raw):
"""Determine displayed image from raw image.
For normal image layers, just return the actual image.
Parameters
-------
raw : array
Raw array.
Returns
-------
image : array
Displayed array.
"""
image = raw
return image
def _set_view_slice(self):
"""Set the view given the indices to slice with."""
not_disp = self.dims.not_displayed
if self.rgb:
# if rgb need to keep the final axis fixed during the
# transpose. The index of the final axis depends on how many
# axes are displayed.
order = self.dims.displayed_order + (
max(self.dims.displayed_order) + 1,
)
else:
order = self.dims.displayed_order
if self.is_pyramid:
# If 3d redering just show lowest level of pyramid
if self.dims.ndisplay == 3:
self.data_level = len(self._data_pyramid) - 1
# Slice currently viewed level
level = self.data_level
indices = np.array(self.dims.indices)
downsampled_indices = (
indices[not_disp] / self.level_downsamples[level, not_disp]
)
downsampled_indices = np.round(
downsampled_indices.astype(float)
).astype(int)
downsampled_indices = np.clip(
downsampled_indices, 0, self.level_shapes[level, not_disp] - 1
)
indices[not_disp] = downsampled_indices
disp_shape = self.level_shapes[level, self.dims.displayed]
scale = np.ones(self.ndim)
for d in self.dims.displayed:
scale[d] = self.level_downsamples[self.data_level][d]
self._scale_view = scale
if np.any(disp_shape > self._max_tile_shape):
for d in self.dims.displayed:
indices[d] = slice(
self._top_left[d],
self._top_left[d] + self._max_tile_shape,
1,
)
self._translate_view = (
self._top_left * self.scale * self._scale_view
)
else:
self._translate_view = [0] * self.ndim
image = np.asarray(
self._data_pyramid[level][tuple(indices)]
).transpose(order)
if level == len(self._data_pyramid) - 1:
thumbnail = image
else:
# Slice thumbnail
indices = np.array(self.dims.indices)
downsampled_indices = (
indices[not_disp] / self.level_downsamples[-1, not_disp]
)
downsampled_indices = np.round(
downsampled_indices.astype(float)
).astype(int)
downsampled_indices = np.clip(
downsampled_indices, 0, self.level_shapes[-1, not_disp] - 1
)
indices[not_disp] = downsampled_indices
thumbnail = np.asarray(
self._data_pyramid[-1][tuple(indices)]
).transpose(order)
else:
self._scale_view = np.ones(self.dims.ndim)
image = np.asarray(self.data[self.dims.indices]).transpose(order)
thumbnail = image
if self.rgb and image.dtype.kind == 'f':
self._data_raw = np.clip(image, 0, 1)
self._data_view = self._raw_to_displayed(self._data_raw)
self._data_thumbnail = self._raw_to_displayed(
np.clip(thumbnail, 0, 1)
)
else:
self._data_raw = image
self._data_view = self._raw_to_displayed(self._data_raw)
self._data_thumbnail = self._raw_to_displayed(thumbnail)
if self.is_pyramid:
self.events.scale()
self.events.translate()
def _update_thumbnail(self):
"""Update thumbnail with current image data and colormap."""
if self.dims.ndisplay == 3 and self.dims.ndim > 2:
image = np.max(self._data_thumbnail, axis=0)
else:
image = self._data_thumbnail
# float16 not supported by ndi.zoom
dtype = np.dtype(image.dtype)
if dtype in [np.dtype(np.float16)]:
image = image.astype(np.float32)
raw_zoom_factor = np.divide(
self._thumbnail_shape[:2], image.shape[:2]
).min()
new_shape = np.clip(
raw_zoom_factor * np.array(image.shape[:2]),
1, # smallest side should be 1 pixel wide
self._thumbnail_shape[:2],
)
zoom_factor = tuple(new_shape / image.shape[:2])
if self.rgb:
# warning filter can be removed with scipy 1.4
with warnings.catch_warnings():
warnings.simplefilter("ignore")
downsampled = ndi.zoom(
image, zoom_factor + (1,), prefilter=False, order=0
)
if image.shape[2] == 4: # image is RGBA
colormapped = np.copy(downsampled)
colormapped[..., 3] = downsampled[..., 3] * self.opacity
if downsampled.dtype == np.uint8:
colormapped = colormapped.astype(np.uint8)
else: # image is RGB
if downsampled.dtype == np.uint8:
alpha = np.full(
downsampled.shape[:2] + (1,),
int(255 * self.opacity),
dtype=np.uint8,
)
else:
alpha = np.full(downsampled.shape[:2] + (1,), self.opacity)
colormapped = np.concatenate([downsampled, alpha], axis=2)
else:
# warning filter can be removed with scipy 1.4
with warnings.catch_warnings():
warnings.simplefilter("ignore")
downsampled = ndi.zoom(
image, zoom_factor, prefilter=False, order=0
)
low, high = self.contrast_limits
downsampled = np.clip(downsampled, low, high)
color_range = high - low
if color_range != 0:
downsampled = (downsampled - low) / color_range
downsampled = downsampled ** self.gamma
color_array = self.colormap[1][downsampled.ravel()]
colormapped = color_array.rgba.reshape(downsampled.shape + (4,))
colormapped[..., 3] *= self.opacity
self.thumbnail = colormapped
def _get_value(self):
"""Returns coordinates, values, and a string for a given mouse position
and set of indices.
Returns
----------
value : tuple
Value of the data at the coord.
"""
coord = np.round(self.coordinates).astype(int)
if self.rgb:
shape = self._data_raw.shape[:-1]
else:
shape = self._data_raw.shape
if all(0 <= c < s for c, s in zip(coord[self.dims.displayed], shape)):
value = self._data_raw[tuple(coord[self.dims.displayed])]
else:
value = None
if self.is_pyramid:
value = (self.data_level, value)
return value
def to_xml_list(self):
"""Generates a list with a single xml element that defines the
currently viewed image as a png according to the svg specification.
Returns
----------
xml : list of xml.etree.ElementTree.Element
List of a single xml element specifying the currently viewed image
as a png according to the svg specification.
"""
if self.dims.ndisplay == 3:
image = np.max(self._data_thumbnail, axis=0)
else:
image = self._data_thumbnail
image = np.clip(
image, self.contrast_limits[0], self.contrast_limits[1]
)
image = image - self.contrast_limits[0]
color_range = self.contrast_limits[1] - self.contrast_limits[0]
if color_range != 0:
image = image / color_range
mapped_image = self.colormap[1][image.ravel()]
mapped_image = mapped_image.RGBA.reshape(image.shape + (4,))
image_str = imwrite('<bytes>', mapped_image, format='png')
image_str = "data:image/png;base64," + str(b64encode(image_str))[2:-1]
props = {'xlink:href': image_str}
width = str(self.shape[self.dims.displayed[1]])
height = str(self.shape[self.dims.displayed[0]])
opacity = str(self.opacity)
xml = Element(
'image', width=width, height=height, opacity=opacity, **props
)
return [xml]
| [
"numpy.ones",
"numpy.clip",
"numpy.round",
"numpy.full",
"warnings.simplefilter",
"numpy.copy",
"xml.etree.ElementTree.Element",
"scipy.ndimage.zoom",
"numpy.max",
"warnings.catch_warnings",
"numpy.divide",
"numpy.asarray",
"numpy.concatenate",
"numpy.all",
"numpy.dtype",
"numpy.zeros"... | [((6396, 6421), 'numpy.zeros', 'np.zeros', (['ndim'], {'dtype': 'int'}), '(ndim, dtype=int)\n', (6404, 6421), True, 'import numpy as np\n'), ((7456, 7489), 'copy.copy', 'copy', (['self._contrast_limits_range'], {}), '(self._contrast_limits_range)\n', (7460, 7489), False, 'from copy import copy\n'), ((9198, 9214), 'numpy.array', 'np.array', (['shapes'], {}), '(shapes)\n', (9206, 9214), True, 'import numpy as np\n'), ((9348, 9398), 'numpy.divide', 'np.divide', (['self.level_shapes[0]', 'self.level_shapes'], {}), '(self.level_shapes[0], self.level_shapes)\n', (9357, 9398), True, 'import numpy as np\n'), ((9600, 9634), 'numpy.all', 'np.all', (['(self._top_left == top_left)'], {}), '(self._top_left == top_left)\n', (9606, 9634), True, 'import numpy as np\n'), ((18407, 18428), 'numpy.dtype', 'np.dtype', (['image.dtype'], {}), '(image.dtype)\n', (18415, 18428), True, 'import numpy as np\n'), ((22018, 22082), 'numpy.clip', 'np.clip', (['image', 'self.contrast_limits[0]', 'self.contrast_limits[1]'], {}), '(image, self.contrast_limits[0], self.contrast_limits[1])\n', (22025, 22082), True, 'import numpy as np\n'), ((22438, 22484), 'imageio.imwrite', 'imwrite', (['"""<bytes>"""', 'mapped_image'], {'format': '"""png"""'}), "('<bytes>', mapped_image, format='png')\n", (22445, 22484), False, 'from imageio import imwrite\n'), ((22769, 22839), 'xml.etree.ElementTree.Element', 'Element', (['"""image"""'], {'width': 'width', 'height': 'height', 'opacity': 'opacity'}), "('image', width=width, height=height, opacity=opacity, **props)\n", (22776, 22839), False, 'from xml.etree.ElementTree import Element\n'), ((6662, 6717), 'numpy.zeros', 'np.zeros', (['((1,) * self.dims.ndisplay + (self.shape[-1],))'], {}), '((1,) * self.dims.ndisplay + (self.shape[-1],))\n', (6670, 6717), True, 'import numpy as np\n'), ((6792, 6827), 'numpy.zeros', 'np.zeros', (['((1,) * self.dims.ndisplay)'], {}), '((1,) * self.dims.ndisplay)\n', (6800, 6827), True, 'import numpy as np\n'), ((11584, 11608), 'copy.copy', 'copy', (['contrast_limits[0]'], {}), '(contrast_limits[0])\n', (11588, 11608), False, 'from copy import copy\n'), ((11718, 11742), 'copy.copy', 'copy', (['contrast_limits[1]'], {}), '(contrast_limits[1])\n', (11722, 11742), False, 'from copy import copy\n'), ((15185, 15212), 'numpy.array', 'np.array', (['self.dims.indices'], {}), '(self.dims.indices)\n', (15193, 15212), True, 'import numpy as np\n'), ((15493, 15564), 'numpy.clip', 'np.clip', (['downsampled_indices', '(0)', '(self.level_shapes[level, not_disp] - 1)'], {}), '(downsampled_indices, 0, self.level_shapes[level, not_disp] - 1)\n', (15500, 15564), True, 'import numpy as np\n'), ((15739, 15757), 'numpy.ones', 'np.ones', (['self.ndim'], {}), '(self.ndim)\n', (15746, 15757), True, 'import numpy as np\n'), ((15923, 15964), 'numpy.any', 'np.any', (['(disp_shape > self._max_tile_shape)'], {}), '(disp_shape > self._max_tile_shape)\n', (15929, 15964), True, 'import numpy as np\n'), ((17372, 17395), 'numpy.ones', 'np.ones', (['self.dims.ndim'], {}), '(self.dims.ndim)\n', (17379, 17395), True, 'import numpy as np\n'), ((17583, 17603), 'numpy.clip', 'np.clip', (['image', '(0)', '(1)'], {}), '(image, 0, 1)\n', (17590, 17603), True, 'import numpy as np\n'), ((18254, 18290), 'numpy.max', 'np.max', (['self._data_thumbnail'], {'axis': '(0)'}), '(self._data_thumbnail, axis=0)\n', (18260, 18290), True, 'import numpy as np\n'), ((20288, 20319), 'numpy.clip', 'np.clip', (['downsampled', 'low', 'high'], {}), '(downsampled, low, high)\n', (20295, 20319), True, 'import numpy as np\n'), ((21910, 21946), 'numpy.max', 'np.max', (['self._data_thumbnail'], {'axis': '(0)'}), '(self._data_thumbnail, axis=0)\n', (21916, 21946), True, 'import numpy as np\n'), ((16697, 16724), 'numpy.array', 'np.array', (['self.dims.indices'], {}), '(self.dims.indices)\n', (16705, 16724), True, 'import numpy as np\n'), ((17030, 17098), 'numpy.clip', 'np.clip', (['downsampled_indices', '(0)', '(self.level_shapes[-1, not_disp] - 1)'], {}), '(downsampled_indices, 0, self.level_shapes[-1, not_disp] - 1)\n', (17037, 17098), True, 'import numpy as np\n'), ((17748, 17772), 'numpy.clip', 'np.clip', (['thumbnail', '(0)', '(1)'], {}), '(thumbnail, 0, 1)\n', (17755, 17772), True, 'import numpy as np\n'), ((18450, 18470), 'numpy.dtype', 'np.dtype', (['np.float16'], {}), '(np.float16)\n', (18458, 18470), True, 'import numpy as np\n'), ((18545, 18598), 'numpy.divide', 'np.divide', (['self._thumbnail_shape[:2]', 'image.shape[:2]'], {}), '(self._thumbnail_shape[:2], image.shape[:2])\n', (18554, 18598), True, 'import numpy as np\n'), ((18686, 18711), 'numpy.array', 'np.array', (['image.shape[:2]'], {}), '(image.shape[:2])\n', (18694, 18711), True, 'import numpy as np\n'), ((18971, 18996), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (18994, 18996), False, 'import warnings\n'), ((19014, 19045), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (19035, 19045), False, 'import warnings\n'), ((19076, 19137), 'scipy.ndimage.zoom', 'ndi.zoom', (['image', '(zoom_factor + (1,))'], {'prefilter': '(False)', 'order': '(0)'}), '(image, zoom_factor + (1,), prefilter=False, order=0)\n', (19084, 19137), True, 'from scipy import ndimage as ndi\n'), ((19259, 19279), 'numpy.copy', 'np.copy', (['downsampled'], {}), '(downsampled)\n', (19266, 19279), True, 'import numpy as np\n'), ((19884, 19928), 'numpy.concatenate', 'np.concatenate', (['[downsampled, alpha]'], {'axis': '(2)'}), '([downsampled, alpha], axis=2)\n', (19898, 19928), True, 'import numpy as np\n'), ((20019, 20044), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (20042, 20044), False, 'import warnings\n'), ((20062, 20093), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (20083, 20093), False, 'import warnings\n'), ((20124, 20178), 'scipy.ndimage.zoom', 'ndi.zoom', (['image', 'zoom_factor'], {'prefilter': '(False)', 'order': '(0)'}), '(image, zoom_factor, prefilter=False, order=0)\n', (20132, 20178), True, 'from scipy import ndimage as ndi\n'), ((20997, 21023), 'numpy.round', 'np.round', (['self.coordinates'], {}), '(self.coordinates)\n', (21005, 21023), True, 'import numpy as np\n'), ((17416, 17456), 'numpy.asarray', 'np.asarray', (['self.data[self.dims.indices]'], {}), '(self.data[self.dims.indices])\n', (17426, 17456), True, 'import numpy as np\n'), ((19802, 19853), 'numpy.full', 'np.full', (['(downsampled.shape[:2] + (1,))', 'self.opacity'], {}), '(downsampled.shape[:2] + (1,), self.opacity)\n', (19809, 19853), True, 'import numpy as np\n'), ((22536, 22556), 'base64.b64encode', 'b64encode', (['image_str'], {}), '(image_str)\n', (22545, 22556), False, 'from base64 import b64encode\n'), ((10561, 10617), 'warnings.warn', 'warnings.warn', (['f"""invalid value for colormap: {colormap}"""'], {}), "(f'invalid value for colormap: {colormap}')\n", (10574, 10617), False, 'import warnings\n')] |
# Baseline model for "SGCN:Sparse Graph Convolution Network for Pedestrian Trajectory Prediction"
# Source-code directly referred from SGCN at https://github.com/shuaishiliu/SGCN/tree/0ff25cedc04852803787196e83c0bb941d724fc2/utils.py
import os
import math
import torch
import numpy as np
from torch.utils.data import Dataset
from tqdm import tqdm
def anorm(p1, p2):
NORM = math.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)
if NORM == 0:
return 0
return 1 / (NORM)
def loc_pos(seq_):
# seq_ [obs_len N 2]
obs_len = seq_.shape[0]
num_ped = seq_.shape[1]
pos_seq = np.arange(1, obs_len + 1)
pos_seq = pos_seq[:, np.newaxis, np.newaxis]
pos_seq = pos_seq.repeat(num_ped, axis=1)
result = np.concatenate((pos_seq, seq_), axis=-1)
return result
def seq_to_graph(seq_, seq_rel, pos_enc=False):
seq_ = seq_.squeeze()
seq_rel = seq_rel.squeeze()
seq_len = seq_.shape[2]
max_nodes = seq_.shape[0]
V = np.zeros((seq_len, max_nodes, 2))
for s in range(seq_len):
step_ = seq_[:, :, s]
step_rel = seq_rel[:, :, s]
for h in range(len(step_)):
V[s, h, :] = step_rel[h]
if pos_enc:
V = loc_pos(V)
return torch.from_numpy(V).type(torch.float)
def poly_fit(traj, traj_len, threshold):
"""
Input:
- traj: Numpy array of shape (2, traj_len)
- traj_len: Len of trajectory
- threshold: Minimum error to be considered for non linear traj
Output:
- int: 1 -> Non Linear 0-> Linear
"""
t = np.linspace(0, traj_len - 1, traj_len)
res_x = np.polyfit(t, traj[0, -traj_len:], 2, full=True)[1]
res_y = np.polyfit(t, traj[1, -traj_len:], 2, full=True)[1]
if res_x + res_y >= threshold:
return 1.0
else:
return 0.0
def read_file(_path, delim='\t'):
data = []
if delim == 'tab':
delim = '\t'
elif delim == 'space':
delim = ' '
with open(_path, 'r') as f:
for line in f:
line = line.strip().split(delim)
line = [float(i) for i in line]
data.append(line)
return np.asarray(data)
class TrajectoryDataset(Dataset):
"""Dataloder for the Trajectory datasets"""
def __init__(
self, data_dir, obs_len=8, pred_len=8, skip=1, threshold=0.002,
min_ped=1, delim='\t'):
"""
Args:
- data_dir: Directory containing dataset files in the format
<frame_id> <ped_id> <x> <y>
- obs_len: Number of time-steps in input trajectories
- pred_len: Number of time-steps in output trajectories
- skip: Number of frames to skip while making the dataset
- threshold: Minimum error to be considered for non linear traj
when using a linear predictor
- min_ped: Minimum number of pedestrians that should be in a seqeunce
- delim: Delimiter in the dataset files
"""
super(TrajectoryDataset, self).__init__()
self.max_peds_in_frame = 0
self.data_dir = data_dir
self.obs_len = obs_len
self.pred_len = pred_len
self.skip = skip
self.seq_len = self.obs_len + self.pred_len
self.delim = delim
all_files = os.listdir(self.data_dir)
all_files = [os.path.join(self.data_dir, _path) for _path in all_files]
num_peds_in_seq = []
seq_list = []
seq_list_rel = []
loss_mask_list = []
non_linear_ped = []
for path in all_files:
data = read_file(path, delim)
frames = np.unique(data[:, 0]).tolist()
frame_data = []
for frame in frames:
frame_data.append(data[frame == data[:, 0], :])
num_sequences = int(
math.ceil((len(frames) - self.seq_len + 1) / skip))
for idx in range(0, num_sequences * self.skip + 1, skip):
curr_seq_data = np.concatenate(
frame_data[idx:idx + self.seq_len], axis=0)
peds_in_curr_seq = np.unique(curr_seq_data[:, 1])
self.max_peds_in_frame = max(self.max_peds_in_frame, len(peds_in_curr_seq))
curr_seq_rel = np.zeros((len(peds_in_curr_seq), 2,
self.seq_len))
curr_seq = np.zeros((len(peds_in_curr_seq), 2, self.seq_len))
curr_loss_mask = np.zeros((len(peds_in_curr_seq),
self.seq_len))
num_peds_considered = 0
_non_linear_ped = []
for _, ped_id in enumerate(peds_in_curr_seq):
curr_ped_seq = curr_seq_data[curr_seq_data[:, 1] ==
ped_id, :]
curr_ped_seq = np.around(curr_ped_seq, decimals=4)
pad_front = frames.index(curr_ped_seq[0, 0]) - idx
pad_end = frames.index(curr_ped_seq[-1, 0]) - idx + 1
if pad_end - pad_front != self.seq_len:
continue
curr_ped_seq = np.transpose(curr_ped_seq[:, 2:])
curr_ped_seq = curr_ped_seq
# Make coordinates relative
rel_curr_ped_seq = np.zeros(curr_ped_seq.shape)
# ipdb.set_trace()
rel_curr_ped_seq[:, 1:] = \
curr_ped_seq[:, 1:] - curr_ped_seq[:, :-1]
# rel_curr_ped_seq[:, 1:] = \
# curr_ped_seq[:, 1:] - np.reshape(curr_ped_seq[:, 0], (2,1))
_idx = num_peds_considered
curr_seq[_idx, :, pad_front:pad_end] = curr_ped_seq
curr_seq_rel[_idx, :, pad_front:pad_end] = rel_curr_ped_seq
# Linear vs Non-Linear Trajectory
_non_linear_ped.append(
poly_fit(curr_ped_seq, pred_len, threshold))
curr_loss_mask[_idx, pad_front:pad_end] = 1
num_peds_considered += 1
if num_peds_considered > min_ped:
non_linear_ped += _non_linear_ped
num_peds_in_seq.append(num_peds_considered)
loss_mask_list.append(curr_loss_mask[:num_peds_considered])
seq_list.append(curr_seq[:num_peds_considered])
seq_list_rel.append(curr_seq_rel[:num_peds_considered])
self.num_seq = len(seq_list)
seq_list = np.concatenate(seq_list, axis=0)
seq_list_rel = np.concatenate(seq_list_rel, axis=0)
loss_mask_list = np.concatenate(loss_mask_list, axis=0)
non_linear_ped = np.asarray(non_linear_ped)
# Convert numpy -> Torch Tensor
self.obs_traj = torch.from_numpy(
seq_list[:, :, :self.obs_len]).type(torch.float)
self.pred_traj = torch.from_numpy(
seq_list[:, :, self.obs_len:]).type(torch.float)
self.obs_traj_rel = torch.from_numpy(
seq_list_rel[:, :, :self.obs_len]).type(torch.float)
self.pred_traj_rel = torch.from_numpy(
seq_list_rel[:, :, self.obs_len:]).type(torch.float)
self.loss_mask = torch.from_numpy(loss_mask_list).type(torch.float)
self.non_linear_ped = torch.from_numpy(non_linear_ped).type(torch.float)
cum_start_idx = [0] + np.cumsum(num_peds_in_seq).tolist()
self.seq_start_end = [
(start, end)
for start, end in zip(cum_start_idx, cum_start_idx[1:])
]
# Convert to Graphs
self.v_obs = []
self.v_pred = []
print("Processing Data .....")
pbar = tqdm(total=len(self.seq_start_end))
for ss in range(len(self.seq_start_end)):
pbar.update(1)
start, end = self.seq_start_end[ss]
v_= seq_to_graph(self.obs_traj[start:end, :], self.obs_traj_rel[start:end, :], True)
self.v_obs.append(v_.clone())
v_= seq_to_graph(self.pred_traj[start:end, :], self.pred_traj_rel[start:end, :], False)
self.v_pred.append(v_.clone())
pbar.close()
def __len__(self):
return self.num_seq
def __getitem__(self, index):
start, end = self.seq_start_end[index]
out = [
self.obs_traj[start:end, :], self.pred_traj[start:end, :],
self.obs_traj_rel[start:end, :], self.pred_traj_rel[start:end, :],
self.non_linear_ped[start:end], self.loss_mask[start:end, :],
self.v_obs[index], self.v_pred[index]
]
return out
| [
"math.sqrt",
"numpy.polyfit",
"numpy.asarray",
"numpy.unique",
"numpy.zeros",
"numpy.transpose",
"numpy.around",
"numpy.cumsum",
"numpy.arange",
"numpy.linspace",
"os.path.join",
"os.listdir",
"numpy.concatenate",
"torch.from_numpy"
] | [((379, 433), 'math.sqrt', 'math.sqrt', (['((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)'], {}), '((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)\n', (388, 433), False, 'import math\n'), ((609, 634), 'numpy.arange', 'np.arange', (['(1)', '(obs_len + 1)'], {}), '(1, obs_len + 1)\n', (618, 634), True, 'import numpy as np\n'), ((744, 784), 'numpy.concatenate', 'np.concatenate', (['(pos_seq, seq_)'], {'axis': '(-1)'}), '((pos_seq, seq_), axis=-1)\n', (758, 784), True, 'import numpy as np\n'), ((978, 1011), 'numpy.zeros', 'np.zeros', (['(seq_len, max_nodes, 2)'], {}), '((seq_len, max_nodes, 2))\n', (986, 1011), True, 'import numpy as np\n'), ((1546, 1584), 'numpy.linspace', 'np.linspace', (['(0)', '(traj_len - 1)', 'traj_len'], {}), '(0, traj_len - 1, traj_len)\n', (1557, 1584), True, 'import numpy as np\n'), ((2122, 2138), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (2132, 2138), True, 'import numpy as np\n'), ((1597, 1645), 'numpy.polyfit', 'np.polyfit', (['t', 'traj[0, -traj_len:]', '(2)'], {'full': '(True)'}), '(t, traj[0, -traj_len:], 2, full=True)\n', (1607, 1645), True, 'import numpy as np\n'), ((1661, 1709), 'numpy.polyfit', 'np.polyfit', (['t', 'traj[1, -traj_len:]', '(2)'], {'full': '(True)'}), '(t, traj[1, -traj_len:], 2, full=True)\n', (1671, 1709), True, 'import numpy as np\n'), ((3232, 3257), 'os.listdir', 'os.listdir', (['self.data_dir'], {}), '(self.data_dir)\n', (3242, 3257), False, 'import os\n'), ((6518, 6550), 'numpy.concatenate', 'np.concatenate', (['seq_list'], {'axis': '(0)'}), '(seq_list, axis=0)\n', (6532, 6550), True, 'import numpy as np\n'), ((6574, 6610), 'numpy.concatenate', 'np.concatenate', (['seq_list_rel'], {'axis': '(0)'}), '(seq_list_rel, axis=0)\n', (6588, 6610), True, 'import numpy as np\n'), ((6636, 6674), 'numpy.concatenate', 'np.concatenate', (['loss_mask_list'], {'axis': '(0)'}), '(loss_mask_list, axis=0)\n', (6650, 6674), True, 'import numpy as np\n'), ((6700, 6726), 'numpy.asarray', 'np.asarray', (['non_linear_ped'], {}), '(non_linear_ped)\n', (6710, 6726), True, 'import numpy as np\n'), ((1232, 1251), 'torch.from_numpy', 'torch.from_numpy', (['V'], {}), '(V)\n', (1248, 1251), False, 'import torch\n'), ((3279, 3313), 'os.path.join', 'os.path.join', (['self.data_dir', '_path'], {}), '(self.data_dir, _path)\n', (3291, 3313), False, 'import os\n'), ((3927, 3985), 'numpy.concatenate', 'np.concatenate', (['frame_data[idx:idx + self.seq_len]'], {'axis': '(0)'}), '(frame_data[idx:idx + self.seq_len], axis=0)\n', (3941, 3985), True, 'import numpy as np\n'), ((4042, 4072), 'numpy.unique', 'np.unique', (['curr_seq_data[:, 1]'], {}), '(curr_seq_data[:, 1])\n', (4051, 4072), True, 'import numpy as np\n'), ((6792, 6839), 'torch.from_numpy', 'torch.from_numpy', (['seq_list[:, :, :self.obs_len]'], {}), '(seq_list[:, :, :self.obs_len])\n', (6808, 6839), False, 'import torch\n'), ((6896, 6943), 'torch.from_numpy', 'torch.from_numpy', (['seq_list[:, :, self.obs_len:]'], {}), '(seq_list[:, :, self.obs_len:])\n', (6912, 6943), False, 'import torch\n'), ((7003, 7054), 'torch.from_numpy', 'torch.from_numpy', (['seq_list_rel[:, :, :self.obs_len]'], {}), '(seq_list_rel[:, :, :self.obs_len])\n', (7019, 7054), False, 'import torch\n'), ((7115, 7166), 'torch.from_numpy', 'torch.from_numpy', (['seq_list_rel[:, :, self.obs_len:]'], {}), '(seq_list_rel[:, :, self.obs_len:])\n', (7131, 7166), False, 'import torch\n'), ((7223, 7255), 'torch.from_numpy', 'torch.from_numpy', (['loss_mask_list'], {}), '(loss_mask_list)\n', (7239, 7255), False, 'import torch\n'), ((7304, 7336), 'torch.from_numpy', 'torch.from_numpy', (['non_linear_ped'], {}), '(non_linear_ped)\n', (7320, 7336), False, 'import torch\n'), ((3567, 3588), 'numpy.unique', 'np.unique', (['data[:, 0]'], {}), '(data[:, 0])\n', (3576, 3588), True, 'import numpy as np\n'), ((4796, 4831), 'numpy.around', 'np.around', (['curr_ped_seq'], {'decimals': '(4)'}), '(curr_ped_seq, decimals=4)\n', (4805, 4831), True, 'import numpy as np\n'), ((5105, 5138), 'numpy.transpose', 'np.transpose', (['curr_ped_seq[:, 2:]'], {}), '(curr_ped_seq[:, 2:])\n', (5117, 5138), True, 'import numpy as np\n'), ((5274, 5302), 'numpy.zeros', 'np.zeros', (['curr_ped_seq.shape'], {}), '(curr_ped_seq.shape)\n', (5282, 5302), True, 'import numpy as np\n'), ((7385, 7411), 'numpy.cumsum', 'np.cumsum', (['num_peds_in_seq'], {}), '(num_peds_in_seq)\n', (7394, 7411), True, 'import numpy as np\n')] |
from socket import timeout
import serial
import serial.tools.list_ports
import struct
import threading
import time
import numpy as np
"""
------------------------------------- 数据包格式 -------------------------------------
字节数 数据 说明
1 0xFF 包头
1 0x 字节长度(数据部分) 0~254
1 0x 该字节用于表示数据类型
n ... data部分
1 0x 校验和,对数据部分累加取低八位
"""
class SendMsg():
def __init__(self, baud) -> None:
self.baud = baud
self.start_time = time.time()
while True:
port_list = list(serial.tools.list_ports.comports())
if len(port_list) == 1:
self.portx = port_list[0].device
break
elif len(port_list) > 1:
temp_i = 1
print(port_list)
for item in port_list:
print(temp_i, ' - ', item.device)
self.portx = port_list[int(input('Please enter the num of the port: '))-1].device
break
else:
input('未发现串口,请重新检测')
self.time_out = 2
self.serial = serial.Serial(self.portx, self.baud, timeout=self.time_out)
print(self.portx, "Open!")
self.RecvThread_thread = threading.Thread(target=self.RecvThread, args=())
self.RecvThread_thread.daemon = True
self.RecvThread_thread.start()
def RecvThread(self):
while True:
time.sleep(0.01)
if self.serial.in_waiting:
RecvData = self.serial.read(self.serial.in_waiting)
if len(RecvData) > 0:
print(RecvData)
def send(self, msg_type : bytes, data : list):
HEAD = b'\xff'
length = len(data)
check_sum = np.sum(data)
check_point = check_sum & 0xff
send_data = HEAD
send_data += struct.pack('=BB', length, msg_type)
for item in data:
send_data += struct.pack('=B', item)
send_data += struct.pack('=B', check_point)
print(send_data)
self.serial.write(send_data)
if __name__ == "__main__":
c = SendMsg(115200)
while True:
c.send(1, [1,2,3])
time.sleep(1) | [
"serial.Serial",
"threading.Thread",
"numpy.sum",
"serial.tools.list_ports.comports",
"struct.pack",
"time.time",
"time.sleep"
] | [((524, 535), 'time.time', 'time.time', ([], {}), '()\n', (533, 535), False, 'import time\n'), ((1141, 1200), 'serial.Serial', 'serial.Serial', (['self.portx', 'self.baud'], {'timeout': 'self.time_out'}), '(self.portx, self.baud, timeout=self.time_out)\n', (1154, 1200), False, 'import serial\n'), ((1270, 1319), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.RecvThread', 'args': '()'}), '(target=self.RecvThread, args=())\n', (1286, 1319), False, 'import threading\n'), ((1788, 1800), 'numpy.sum', 'np.sum', (['data'], {}), '(data)\n', (1794, 1800), True, 'import numpy as np\n'), ((1887, 1923), 'struct.pack', 'struct.pack', (['"""=BB"""', 'length', 'msg_type'], {}), "('=BB', length, msg_type)\n", (1898, 1923), False, 'import struct\n'), ((2020, 2050), 'struct.pack', 'struct.pack', (['"""=B"""', 'check_point'], {}), "('=B', check_point)\n", (2031, 2050), False, 'import struct\n'), ((2217, 2230), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2227, 2230), False, 'import time\n'), ((1463, 1479), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (1473, 1479), False, 'import time\n'), ((1975, 1998), 'struct.pack', 'struct.pack', (['"""=B"""', 'item'], {}), "('=B', item)\n", (1986, 1998), False, 'import struct\n'), ((585, 619), 'serial.tools.list_ports.comports', 'serial.tools.list_ports.comports', ([], {}), '()\n', (617, 619), False, 'import serial\n')] |
import numpy
from numpy import savetxt
import matplotlib.pyplot as plt
import matplotlib
from io import BytesIO
import base64
from PIL import Image
### Generating X,Y coordinaltes to be used in plot
data = numpy.load('../Inbreastdata.npy')
print(type(data))
print(len(data))
print(data.shape)
print(data[0])
size= len(data)
print(size)
# for one file
# filename = "new-image"
# #Save as png
# img_name = filename +".png"
# matplotlib.image.imsave(img_name, data[0])
# print(filename + " was saved")
# for conversion of the files in .npy to .png
for i in range(size):
filename = "image"+str(i)
img_name = filename +".png"
matplotlib.image.imsave("images/"+img_name, data[i],cmap="gray")
print(filename + " was saved")
# for conversion of the files in .npy to plt images
for i in range(size):
X = numpy.linspace(i,10,30)
Y = X*X
### Generating The Plot
plt.plot(X,Y)
filename = "image_plot"+str(i)
img_name = filename +".png"
### Saving plot to disk in png format
plt.savefig("plots/"+filename+'.png')
# # savetxt('data.csv', data, delimiter=',')
# # numpy.savetxt('data.txt',data, delimiter=' ')
# X = numpy.linspace(0,10,30)
# Y = X*X
# ### Generating The Plot
# plt.plot(X,Y)
# ### Saving plot to disk in png format
# plt.savefig('plt.png')
### Rendering Plot in Html
figfile = BytesIO()
plt.savefig(figfile, format='png')
figfile.seek(0)
figdata_png = base64.b64encode(figfile.getvalue()).decode('ascii')
result = figdata_png | [
"io.BytesIO",
"numpy.load",
"matplotlib.pyplot.plot",
"matplotlib.image.imsave",
"numpy.linspace",
"matplotlib.pyplot.savefig"
] | [((210, 243), 'numpy.load', 'numpy.load', (['"""../Inbreastdata.npy"""'], {}), "('../Inbreastdata.npy')\n", (220, 243), False, 'import numpy\n'), ((1339, 1348), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (1346, 1348), False, 'from io import BytesIO\n'), ((1349, 1383), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figfile'], {'format': '"""png"""'}), "(figfile, format='png')\n", (1360, 1383), True, 'import matplotlib.pyplot as plt\n'), ((639, 706), 'matplotlib.image.imsave', 'matplotlib.image.imsave', (["('images/' + img_name)", 'data[i]'], {'cmap': '"""gray"""'}), "('images/' + img_name, data[i], cmap='gray')\n", (662, 706), False, 'import matplotlib\n'), ((822, 847), 'numpy.linspace', 'numpy.linspace', (['i', '(10)', '(30)'], {}), '(i, 10, 30)\n', (836, 847), False, 'import numpy\n'), ((890, 904), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'Y'], {}), '(X, Y)\n', (898, 904), True, 'import matplotlib.pyplot as plt\n'), ((1017, 1058), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('plots/' + filename + '.png')"], {}), "('plots/' + filename + '.png')\n", (1028, 1058), True, 'import matplotlib.pyplot as plt\n')] |
"""coding=utf-8
Copyright 2020 Huawei Technologies Co., Ltd
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
============================================================================
"""
import os
import numpy as np
import random
import argparse
import pickle
import tensorflow as tf
from driver.Config import Configurable
from handle_data import dataLoader, CreatVocab
# from handle_data.CreatVocab import *
from handle_data.train import train
from bert.pretrain import modeling, tokenization
if __name__ == '__main__':
random.seed(233)
np.random.seed(233)
tf.set_random_seed(233)
# parameters
parse = argparse.ArgumentParser()
parse.add_argument('--config_file', type=str, default='default.ini')
parse.add_argument('--thread', type=int, default=1)
parse.add_argument('--use_cuda', action='store_true', default=False)
parse.add_argument('-bert_config_file',
type=str,
default=os.path.join('chinese_L-12_H-768_A-12',
'bert_config.json'))
parse.add_argument('-vocab_file',
type=str,
default=os.path.join('chinese_L-12_H-768_A-12',
'vocab.txt'),
help='bert_vocab')
parse.add_argument(
'-max_seq_length',
type=int,
default=202,
help=
'The maximum total input sequence length after WordPiece tokenization.'
)
parse.add_argument(
'-warmup_proportion',
type=float,
default=0.1,
help='Proportion of training to perform linear learning rate warmup for '
'E.g., 0.1 = 10% of training.')
parse.add_argument('-do_lower_case',
type=bool,
default=True,
help='Whether to lower case the input text.')
args, extra_args = parse.parse_known_args()
config = Configurable(args.config_file, extra_args)
bert_config = modeling.BertConfig.from_json_file(args.bert_config_file)
if args.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(args.max_seq_length, bert_config.max_position_embeddings))
tokenizer = tokenization.FullTokenizer(vocab_file=args.vocab_file,
do_lower_case=args.do_lower_case)
if config.decode:
path = './data/test.txt'
dev_data, sentence_length = dataLoader.decoder_sentence(path)
with open(config.save_dirs + '/' + config.word_path, 'rb') as f:
src_vocab = pickle.load(f)
with open(config.save_dirs + '/' + config.label_path, 'rb') as f:
tgt_vocab = pickle.load(f)
train("", dev_data, (src_vocab, tgt_vocab), tgt_vocab.size, config,
bert_config, tokenizer)
else:
train_data, res = dataLoader.read_sentence(
"./data/train_hotel.txt", True)
sentence_length, src_dic, tgt_dic = res
dev_data, sentence_length = dataLoader.read_sentence(
"./data/dev_hotel.txt", False)
src_vocab, tgt_vocab = CreatVocab.create_vocabularies(
train_data, 20000, src_dic, tgt_dic)
print("src_vocab:", src_vocab.size)
print("tgt_vocab:", tgt_vocab.size)
with open(config.save_dirs + '/' + config.word_path, 'wb') as f:
pickle.dump(src_vocab, f)
print("save src_vocab successfully in " + config.save_dirs + '/' +
config.word_path)
with open(config.save_dirs + '/' + config.label_path, 'wb') as f:
pickle.dump(tgt_vocab, f)
print("save tgt_vocab successfully in " + config.save_dirs + '/' +
config.label_path)
train(train_data, dev_data, (src_vocab, tgt_vocab), tgt_vocab.size,
config, bert_config, tokenizer)
| [
"bert.pretrain.tokenization.FullTokenizer",
"pickle.dump",
"driver.Config.Configurable",
"numpy.random.seed",
"argparse.ArgumentParser",
"handle_data.dataLoader.read_sentence",
"bert.pretrain.modeling.BertConfig.from_json_file",
"tensorflow.set_random_seed",
"pickle.load",
"random.seed",
"handle... | [((1034, 1050), 'random.seed', 'random.seed', (['(233)'], {}), '(233)\n', (1045, 1050), False, 'import random\n'), ((1056, 1075), 'numpy.random.seed', 'np.random.seed', (['(233)'], {}), '(233)\n', (1070, 1075), True, 'import numpy as np\n'), ((1081, 1104), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(233)'], {}), '(233)\n', (1099, 1104), True, 'import tensorflow as tf\n'), ((1138, 1163), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1161, 1163), False, 'import argparse\n'), ((2505, 2547), 'driver.Config.Configurable', 'Configurable', (['args.config_file', 'extra_args'], {}), '(args.config_file, extra_args)\n', (2517, 2547), False, 'from driver.Config import Configurable\n'), ((2567, 2624), 'bert.pretrain.modeling.BertConfig.from_json_file', 'modeling.BertConfig.from_json_file', (['args.bert_config_file'], {}), '(args.bert_config_file)\n', (2601, 2624), False, 'from bert.pretrain import modeling, tokenization\n'), ((2941, 3034), 'bert.pretrain.tokenization.FullTokenizer', 'tokenization.FullTokenizer', ([], {'vocab_file': 'args.vocab_file', 'do_lower_case': 'args.do_lower_case'}), '(vocab_file=args.vocab_file, do_lower_case=args.\n do_lower_case)\n', (2967, 3034), False, 'from bert.pretrain import modeling, tokenization\n'), ((3170, 3203), 'handle_data.dataLoader.decoder_sentence', 'dataLoader.decoder_sentence', (['path'], {}), '(path)\n', (3197, 3203), False, 'from handle_data import dataLoader, CreatVocab\n'), ((3442, 3537), 'handle_data.train.train', 'train', (['""""""', 'dev_data', '(src_vocab, tgt_vocab)', 'tgt_vocab.size', 'config', 'bert_config', 'tokenizer'], {}), "('', dev_data, (src_vocab, tgt_vocab), tgt_vocab.size, config,\n bert_config, tokenizer)\n", (3447, 3537), False, 'from handle_data.train import train\n'), ((3587, 3643), 'handle_data.dataLoader.read_sentence', 'dataLoader.read_sentence', (['"""./data/train_hotel.txt"""', '(True)'], {}), "('./data/train_hotel.txt', True)\n", (3611, 3643), False, 'from handle_data import dataLoader, CreatVocab\n'), ((3744, 3799), 'handle_data.dataLoader.read_sentence', 'dataLoader.read_sentence', (['"""./data/dev_hotel.txt"""', '(False)'], {}), "('./data/dev_hotel.txt', False)\n", (3768, 3799), False, 'from handle_data import dataLoader, CreatVocab\n'), ((3846, 3913), 'handle_data.CreatVocab.create_vocabularies', 'CreatVocab.create_vocabularies', (['train_data', '(20000)', 'src_dic', 'tgt_dic'], {}), '(train_data, 20000, src_dic, tgt_dic)\n', (3876, 3913), False, 'from handle_data import dataLoader, CreatVocab\n'), ((4473, 4576), 'handle_data.train.train', 'train', (['train_data', 'dev_data', '(src_vocab, tgt_vocab)', 'tgt_vocab.size', 'config', 'bert_config', 'tokenizer'], {}), '(train_data, dev_data, (src_vocab, tgt_vocab), tgt_vocab.size, config,\n bert_config, tokenizer)\n', (4478, 4576), False, 'from handle_data.train import train\n'), ((1482, 1541), 'os.path.join', 'os.path.join', (['"""chinese_L-12_H-768_A-12"""', '"""bert_config.json"""'], {}), "('chinese_L-12_H-768_A-12', 'bert_config.json')\n", (1494, 1541), False, 'import os\n'), ((1693, 1745), 'os.path.join', 'os.path.join', (['"""chinese_L-12_H-768_A-12"""', '"""vocab.txt"""'], {}), "('chinese_L-12_H-768_A-12', 'vocab.txt')\n", (1705, 1745), False, 'import os\n'), ((3303, 3317), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3314, 3317), False, 'import pickle\n'), ((3418, 3432), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3429, 3432), False, 'import pickle\n'), ((4105, 4130), 'pickle.dump', 'pickle.dump', (['src_vocab', 'f'], {}), '(src_vocab, f)\n', (4116, 4130), False, 'import pickle\n'), ((4328, 4353), 'pickle.dump', 'pickle.dump', (['tgt_vocab', 'f'], {}), '(tgt_vocab, f)\n', (4339, 4353), False, 'import pickle\n')] |
"""
This module contains code for interacting with hit graphs.
A Graph is a namedtuple of matrices X, Ri, Ro, y.
"""
from collections import namedtuple
import numpy as np
import torch
import matplotlib.pyplot as plt
import tqdm
# A Graph is a namedtuple of matrices (X, Ri, Ro, y)
# Graph = namedtuple('Graph', ['X', 'Ri', 'Ro', 'y'])
from sparse_tensor import SpTensor
Graph = namedtuple('Graph', ['X', 'spRi', 'spRo', 'y'])
def graph_to_sparse(graph):
Ri_rows, Ri_cols = graph.Ri.nonzero()
Ro_rows, Ro_cols = graph.Ro.nonzero()
return dict(X=graph.X, y=graph.y,
Ri_rows=Ri_rows, Ri_cols=Ri_cols,
Ro_rows=Ro_rows, Ro_cols=Ro_cols)
def sparse_to_graph(X, Ri_rows, Ri_cols, Ro_rows, Ro_cols, y, simmatched, dtype=np.float32):
n_nodes, n_edges = X.shape[0], Ri_rows.shape[0]
spRi_idxs = np.stack([Ri_rows.astype(np.int64), Ri_cols.astype(np.int64)])
# Ri_rows and Ri_cols have the same shape
spRi_vals = np.ones((Ri_rows.shape[0],), dtype=dtype)
spRi = (spRi_idxs,spRi_vals,n_nodes,n_edges)#SpTensor(spRi_idxs, spRi_vals, (n_nodes, n_edges))
spRo_idxs = np.stack([Ro_rows.astype(np.int64), Ro_cols.astype(np.int64)])
# Ro_rows and Ro_cols have the same shape
spRo_vals = np.ones((Ro_rows.shape[0],), dtype=dtype)
spRo = (spRo_idxs,spRo_vals,n_nodes,n_edges)#SpTensor(spRo_idxs, spRo_vals, (n_nodes, n_edges))
if y.dtype != np.uint8:
y = y.astype(np.uint8)
return Graph(X, spRi, spRo, y)
def save_graph(graph, filename):
"""Write a single graph to an NPZ file archive"""
np.savez(filename, **graph_to_sparse(graph))
def save_graphs(graphs, filenames):
for graph, filename in zip(graphs, filenames):
save_graph(graph, filename)
def load_graph(filename):
"""Reade a single graph NPZ"""
with np.load(filename) as f:
return sparse_to_graph(**dict(f.items()))
def load_graphs(filenames, graph_type=Graph):
return [load_graph(f, graph_type) for f in filenames]
#thanks Steve :-)
def draw_sample(X, Ri, Ro, y, out,
cmap='bwr_r',
skip_false_edges=True,
alpha_labels=False,
sim_list=None):
# Select the i/o node features for each segment
feats_o = X[Ro]
feats_i = X[Ri]
# Prepare the figure
fig, (ax0,ax1) = plt.subplots(1, 2, figsize=(20,12))
cmap = plt.get_cmap(cmap)
#if sim_list is None:
# Draw the hits (layer, x, y)
# ax0.scatter(X[:,0], X[:,2], c='k')
# ax1.scatter(X[:,1], X[:,2], c='k')
#else:
# ax0.scatter(X[:,0], X[:,2], c='k')
# ax1.scatter(X[:,1], X[:,2], c='k')
# ax0.scatter(X[sim_list,0], X[sim_list,2], c='b')
# ax1.scatter(X[sim_list,1], X[sim_list,2], c='b')
# Draw the segments
if out is not None:
t = tqdm.tqdm(range(out.shape[0]))
for j in t:
if y[j] and out[j]>0.5:
seg_args = dict(c='purple', alpha=0.2)
elif y[j] and out[j]<0.5:
seg_args = dict(c='blue', alpha=0.2)
elif out[j]>0.5:
seg_args = dict(c='red', alpha=0.2)
else:
continue #false edge
ax0.plot([feats_o[j,0], feats_i[j,0]],
[feats_o[j,2], feats_i[j,2]], '-', **seg_args)
ax1.plot([feats_o[j,1], feats_i[j,1]],
[feats_o[j,2], feats_i[j,2]], '-', **seg_args)
else:
t = tqdm.tqdm(range(y.shape[0]))
for j in t:
if y[j]:
seg_args = dict(c='b', alpha=0.4)
elif not skip_false_edges:
seg_args = dict(c='black', alpha=0.4)
else: continue
ax0.plot([feats_o[j,0], feats_i[j,0]],
[feats_o[j,2], feats_i[j,2]], '-', **seg_args)
ax1.plot([feats_o[j,1], feats_i[j,1]],
[feats_o[j,2], feats_i[j,2]], '-', **seg_args)
# Adjust axes
ax0.set_xlabel('$x$ [cm]')
ax1.set_xlabel('$y$ [cm]')
ax0.set_ylabel('$layer$ [arb]')
ax1.set_ylabel('$layer$ [arb]')
plt.tight_layout()
return fig; | [
"numpy.load",
"matplotlib.pyplot.get_cmap",
"numpy.ones",
"matplotlib.pyplot.subplots",
"collections.namedtuple",
"matplotlib.pyplot.tight_layout"
] | [((382, 429), 'collections.namedtuple', 'namedtuple', (['"""Graph"""', "['X', 'spRi', 'spRo', 'y']"], {}), "('Graph', ['X', 'spRi', 'spRo', 'y'])\n", (392, 429), False, 'from collections import namedtuple\n'), ((968, 1009), 'numpy.ones', 'np.ones', (['(Ri_rows.shape[0],)'], {'dtype': 'dtype'}), '((Ri_rows.shape[0],), dtype=dtype)\n', (975, 1009), True, 'import numpy as np\n'), ((1252, 1293), 'numpy.ones', 'np.ones', (['(Ro_rows.shape[0],)'], {'dtype': 'dtype'}), '((Ro_rows.shape[0],), dtype=dtype)\n', (1259, 1293), True, 'import numpy as np\n'), ((2346, 2382), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(20, 12)'}), '(1, 2, figsize=(20, 12))\n', (2358, 2382), True, 'import matplotlib.pyplot as plt\n'), ((2393, 2411), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['cmap'], {}), '(cmap)\n', (2405, 2411), True, 'import matplotlib.pyplot as plt\n'), ((4184, 4202), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4200, 4202), True, 'import matplotlib.pyplot as plt\n'), ((1825, 1842), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (1832, 1842), True, 'import numpy as np\n')] |
import sys
sys.path.append('../../')
import numpy as np
import time
from scipy import stats
from matplotlib import pyplot as plt
from gpsearch import GaussianInputs, KDE_Numba
from gpsearch.examples import Oscillator, Noise
from KDEpy import FFTKDE
import statsmodels.api as sm
def benchmark_gumbel(n_run=1):
for ii in range(1,5):
toc_scipy = 0.0
toc_numba = 0.0
toc_kdepy = 0.0
toc_stats = 0.0
for nn in range(n_run):
mu, beta = 0, 0.1
smpl = np.random.gumbel(mu, beta, int(10**(ii)))
x_d = np.linspace(np.min(smpl)-0.01*np.abs(np.min(smpl)),
np.max(smpl)+0.01*np.abs(np.max(smpl)), 10000)
weights = np.ones(smpl.shape)
bw = KDE_Numba(smpl, weights=weights).bw
tic = time.time()
pdf_scipy = stats.gaussian_kde(smpl, weights=weights)(x_d)
toc_scipy += time.time() - tic
tic = time.time()
pdf_numba = KDE_Numba(smpl, weights=weights)(x_d)
toc_numba += time.time() - tic
tic = time.time()
pdf_kdepy = FFTKDE(bw=bw).fit(smpl, weights)(x_d)
toc_kdepy += time.time() - tic
tic = time.time()
dens = sm.nonparametric.KDEUnivariate(smpl)
dens.fit(bw=bw, weights=weights, fft=False)
pdf_stats = dens.evaluate(x_d)
toc_stats += time.time() - tic
print(ii, toc_scipy/n_run, toc_numba/n_run, toc_kdepy/n_run, toc_stats/n_run)
def compare_pdf_Oscillator():
smpl = np.genfromtxt("map_samples2D.txt")
ndim = 2
tf = 25
nsteps = 1000
u_init = [0, 0]
noise = Noise([0, tf])
lam = noise.get_eigenvalues(ndim)
mean = np.zeros(ndim)
cov = np.diag(lam)
domain = [ [-a, a] for a in 6.0*np.sqrt(np.diag(cov)) ]
inputs = GaussianInputs(mean, cov, domain)
weights = inputs.pdf(smpl[:,0:-1])
x_d = np.linspace(-3,3,500)
#weights = weights/weights
pdf_scipy = stats.gaussian_kde(smpl[:,-1], weights=weights)
pdf_numba = KDE_Numba(smpl[:,-1], weights=weights)
pdf_kdepy = FFTKDE(bw=pdf_numba.bw).fit(smpl[:,-1], weights)
plt.semilogy(x_d, pdf_scipy(x_d), lw=3)
plt.semilogy(x_d, pdf_numba(x_d), '--')
plt.semilogy(x_d, pdf_kdepy(x_d), '--', lw=0.5)
plt.xlim(-3, 3)
plt.ylim(1e-8, 1e2)
plt.show()
if __name__ == "__main__":
#benchmark_gumbel(20)
compare_pdf_Oscillator()
| [
"sys.path.append",
"matplotlib.pyplot.xlim",
"statsmodels.api.nonparametric.KDEUnivariate",
"matplotlib.pyplot.show",
"gpsearch.GaussianInputs",
"matplotlib.pyplot.ylim",
"numpy.zeros",
"numpy.genfromtxt",
"scipy.stats.gaussian_kde",
"numpy.ones",
"time.time",
"gpsearch.KDE_Numba",
"numpy.mi... | [((11, 36), 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), "('../../')\n", (26, 36), False, 'import sys\n'), ((1582, 1616), 'numpy.genfromtxt', 'np.genfromtxt', (['"""map_samples2D.txt"""'], {}), "('map_samples2D.txt')\n", (1595, 1616), True, 'import numpy as np\n'), ((1692, 1706), 'gpsearch.examples.Noise', 'Noise', (['[0, tf]'], {}), '([0, tf])\n', (1697, 1706), False, 'from gpsearch.examples import Oscillator, Noise\n'), ((1756, 1770), 'numpy.zeros', 'np.zeros', (['ndim'], {}), '(ndim)\n', (1764, 1770), True, 'import numpy as np\n'), ((1781, 1793), 'numpy.diag', 'np.diag', (['lam'], {}), '(lam)\n', (1788, 1793), True, 'import numpy as np\n'), ((1867, 1900), 'gpsearch.GaussianInputs', 'GaussianInputs', (['mean', 'cov', 'domain'], {}), '(mean, cov, domain)\n', (1881, 1900), False, 'from gpsearch import GaussianInputs, KDE_Numba\n'), ((1950, 1973), 'numpy.linspace', 'np.linspace', (['(-3)', '(3)', '(500)'], {}), '(-3, 3, 500)\n', (1961, 1973), True, 'import numpy as np\n'), ((2020, 2068), 'scipy.stats.gaussian_kde', 'stats.gaussian_kde', (['smpl[:, -1]'], {'weights': 'weights'}), '(smpl[:, -1], weights=weights)\n', (2038, 2068), False, 'from scipy import stats\n'), ((2084, 2123), 'gpsearch.KDE_Numba', 'KDE_Numba', (['smpl[:, -1]'], {'weights': 'weights'}), '(smpl[:, -1], weights=weights)\n', (2093, 2123), False, 'from gpsearch import GaussianInputs, KDE_Numba\n'), ((2334, 2349), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-3)', '(3)'], {}), '(-3, 3)\n', (2342, 2349), True, 'from matplotlib import pyplot as plt\n'), ((2354, 2376), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(1e-08)', '(100.0)'], {}), '(1e-08, 100.0)\n', (2362, 2376), True, 'from matplotlib import pyplot as plt\n'), ((2378, 2388), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2386, 2388), True, 'from matplotlib import pyplot as plt\n'), ((731, 750), 'numpy.ones', 'np.ones', (['smpl.shape'], {}), '(smpl.shape)\n', (738, 750), True, 'import numpy as np\n'), ((823, 834), 'time.time', 'time.time', ([], {}), '()\n', (832, 834), False, 'import time\n'), ((968, 979), 'time.time', 'time.time', ([], {}), '()\n', (977, 979), False, 'import time\n'), ((1104, 1115), 'time.time', 'time.time', ([], {}), '()\n', (1113, 1115), False, 'import time\n'), ((1240, 1251), 'time.time', 'time.time', ([], {}), '()\n', (1249, 1251), False, 'import time\n'), ((1271, 1307), 'statsmodels.api.nonparametric.KDEUnivariate', 'sm.nonparametric.KDEUnivariate', (['smpl'], {}), '(smpl)\n', (1301, 1307), True, 'import statsmodels.api as sm\n'), ((2140, 2163), 'KDEpy.FFTKDE', 'FFTKDE', ([], {'bw': 'pdf_numba.bw'}), '(bw=pdf_numba.bw)\n', (2146, 2163), False, 'from KDEpy import FFTKDE\n'), ((768, 800), 'gpsearch.KDE_Numba', 'KDE_Numba', (['smpl'], {'weights': 'weights'}), '(smpl, weights=weights)\n', (777, 800), False, 'from gpsearch import GaussianInputs, KDE_Numba\n'), ((859, 900), 'scipy.stats.gaussian_kde', 'stats.gaussian_kde', (['smpl'], {'weights': 'weights'}), '(smpl, weights=weights)\n', (877, 900), False, 'from scipy import stats\n'), ((931, 942), 'time.time', 'time.time', ([], {}), '()\n', (940, 942), False, 'import time\n'), ((1004, 1036), 'gpsearch.KDE_Numba', 'KDE_Numba', (['smpl'], {'weights': 'weights'}), '(smpl, weights=weights)\n', (1013, 1036), False, 'from gpsearch import GaussianInputs, KDE_Numba\n'), ((1067, 1078), 'time.time', 'time.time', ([], {}), '()\n', (1076, 1078), False, 'import time\n'), ((1203, 1214), 'time.time', 'time.time', ([], {}), '()\n', (1212, 1214), False, 'import time\n'), ((1432, 1443), 'time.time', 'time.time', ([], {}), '()\n', (1441, 1443), False, 'import time\n'), ((592, 604), 'numpy.min', 'np.min', (['smpl'], {}), '(smpl)\n', (598, 604), True, 'import numpy as np\n'), ((662, 674), 'numpy.max', 'np.max', (['smpl'], {}), '(smpl)\n', (668, 674), True, 'import numpy as np\n'), ((1838, 1850), 'numpy.diag', 'np.diag', (['cov'], {}), '(cov)\n', (1845, 1850), True, 'import numpy as np\n'), ((1140, 1153), 'KDEpy.FFTKDE', 'FFTKDE', ([], {'bw': 'bw'}), '(bw=bw)\n', (1146, 1153), False, 'from KDEpy import FFTKDE\n'), ((617, 629), 'numpy.min', 'np.min', (['smpl'], {}), '(smpl)\n', (623, 629), True, 'import numpy as np\n'), ((687, 699), 'numpy.max', 'np.max', (['smpl'], {}), '(smpl)\n', (693, 699), True, 'import numpy as np\n')] |
r"""
.. _disk-spatial-model:
Disk Spatial Model
==================
This is a spatial model parametrising a disk.
By default, the model is symmetric, i.e. a disk:
.. math::
\phi(lon, lat) = \frac{1}{2 \pi (1 - \cos{r_0}) } \cdot
\begin{cases}
1 & \text{for } \theta \leq r_0 \
0 & \text{for } \theta > r_0
\end{cases}
where :math:`\theta` is the sky separation. To improve fit convergence of the
model, the sharp edges is smoothed using `~scipy.special.erf`.
In case an eccentricity (`e`) and rotation angle (:math:`\phi`) are passed,
then the model is an elongated disk (i.e. an ellipse), with a major semiaxis of length :math:`r_0`
and position angle :math:`\phi` (increaing counter-clockwise from the North direction).
The model is defined on the celestial sphere, with a normalization defined by:
.. math::
\int_{4\pi}\phi(\text{lon}, \text{lat}) \,d\Omega = 1\,.
"""
# %%
# Example plot
# ------------
# Here is an example plot of the model:
import numpy as np
from astropy.coordinates import Angle
from gammapy.modeling.models import (
DiskSpatialModel,
Models,
PowerLawSpectralModel,
SkyModel,
)
phi = Angle("30 deg")
model = DiskSpatialModel(
lon_0="2 deg", lat_0="2 deg", r_0="1 deg", e=0.8, phi="30 deg", frame="galactic",
)
ax = model.plot(add_cbar=True)
# illustrate size parameter
region = model.to_region().to_pixel(ax.wcs)
artist = region.as_artist(facecolor="none", edgecolor="red")
ax.add_artist(artist)
transform = ax.get_transform("galactic")
ax.scatter(2, 2, transform=transform, s=20, edgecolor="red", facecolor="red")
ax.text(1.7, 1.85, r"$(l_0, b_0)$", transform=transform, ha="center")
ax.plot([2, 2 + np.sin(phi)], [2, 2 + np.cos(phi)], color="r", transform=transform)
ax.vlines(x=2, color="r", linestyle="--", transform=transform, ymin=0, ymax=5)
ax.text(2.15, 2.3, r"$\phi$", transform=transform)
# %%
# This plot illustrates the definition of the edge parameter:
import matplotlib.pyplot as plt
from astropy import units as u
from gammapy.modeling.models import DiskSpatialModel
import numpy as np
lons = np.linspace(0, 0.3, 500) * u.deg
r_0, edge = 0.2 * u.deg, 0.1 * u.deg
disk = DiskSpatialModel(lon_0="0 deg", lat_0="0 deg", r_0=r_0, edge=edge)
profile = disk(lons, 0 * u.deg)
plt.plot(lons, profile / profile.max(), alpha=0.5)
plt.xlabel("Radius (deg)")
plt.ylabel("Profile (A.U.)")
edge_min, edge_max = (r_0 - edge / 2.).value, (r_0 + edge / 2.).value
plt.vlines([edge_min, edge_max], 0, 1, linestyles=["--"], color="k")
plt.annotate("", xy=(edge_min, 0.5), xytext=(edge_min + edge.value, 0.5),
arrowprops=dict(arrowstyle="<->", lw=2))
plt.text(0.2, 0.53, "Edge width", ha="center", size=12)
plt.hlines([0.95], edge_min - 0.02, edge_min + 0.02, linestyles=["-"], color="k")
plt.text(edge_min + 0.02, 0.95, "95%", size=12, va="center")
plt.hlines([0.05], edge_max - 0.02, edge_max + 0.02, linestyles=["-"], color="k")
plt.text(edge_max - 0.02, 0.05, "5%", size=12, va="center", ha="right")
plt.show()
# %%
# YAML representation
# -------------------
# Here is an example YAML file using the model:
pwl = PowerLawSpectralModel()
gauss = DiskSpatialModel()
model = SkyModel(spectral_model=pwl, spatial_model=gauss, name="pwl-disk-model")
models = Models([model])
print(models.to_yaml())
| [
"gammapy.modeling.models.Models",
"matplotlib.pyplot.show",
"matplotlib.pyplot.vlines",
"matplotlib.pyplot.text",
"gammapy.modeling.models.DiskSpatialModel",
"gammapy.modeling.models.PowerLawSpectralModel",
"numpy.sin",
"numpy.linspace",
"numpy.cos",
"matplotlib.pyplot.ylabel",
"astropy.coordina... | [((1202, 1217), 'astropy.coordinates.Angle', 'Angle', (['"""30 deg"""'], {}), "('30 deg')\n", (1207, 1217), False, 'from astropy.coordinates import Angle\n'), ((1226, 1329), 'gammapy.modeling.models.DiskSpatialModel', 'DiskSpatialModel', ([], {'lon_0': '"""2 deg"""', 'lat_0': '"""2 deg"""', 'r_0': '"""1 deg"""', 'e': '(0.8)', 'phi': '"""30 deg"""', 'frame': '"""galactic"""'}), "(lon_0='2 deg', lat_0='2 deg', r_0='1 deg', e=0.8, phi=\n '30 deg', frame='galactic')\n", (1242, 1329), False, 'from gammapy.modeling.models import DiskSpatialModel\n'), ((2216, 2282), 'gammapy.modeling.models.DiskSpatialModel', 'DiskSpatialModel', ([], {'lon_0': '"""0 deg"""', 'lat_0': '"""0 deg"""', 'r_0': 'r_0', 'edge': 'edge'}), "(lon_0='0 deg', lat_0='0 deg', r_0=r_0, edge=edge)\n", (2232, 2282), False, 'from gammapy.modeling.models import DiskSpatialModel\n'), ((2367, 2393), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Radius (deg)"""'], {}), "('Radius (deg)')\n", (2377, 2393), True, 'import matplotlib.pyplot as plt\n'), ((2394, 2422), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Profile (A.U.)"""'], {}), "('Profile (A.U.)')\n", (2404, 2422), True, 'import matplotlib.pyplot as plt\n'), ((2494, 2562), 'matplotlib.pyplot.vlines', 'plt.vlines', (['[edge_min, edge_max]', '(0)', '(1)'], {'linestyles': "['--']", 'color': '"""k"""'}), "([edge_min, edge_max], 0, 1, linestyles=['--'], color='k')\n", (2504, 2562), True, 'import matplotlib.pyplot as plt\n'), ((2691, 2746), 'matplotlib.pyplot.text', 'plt.text', (['(0.2)', '(0.53)', '"""Edge width"""'], {'ha': '"""center"""', 'size': '(12)'}), "(0.2, 0.53, 'Edge width', ha='center', size=12)\n", (2699, 2746), True, 'import matplotlib.pyplot as plt\n'), ((2747, 2832), 'matplotlib.pyplot.hlines', 'plt.hlines', (['[0.95]', '(edge_min - 0.02)', '(edge_min + 0.02)'], {'linestyles': "['-']", 'color': '"""k"""'}), "([0.95], edge_min - 0.02, edge_min + 0.02, linestyles=['-'],\n color='k')\n", (2757, 2832), True, 'import matplotlib.pyplot as plt\n'), ((2829, 2889), 'matplotlib.pyplot.text', 'plt.text', (['(edge_min + 0.02)', '(0.95)', '"""95%"""'], {'size': '(12)', 'va': '"""center"""'}), "(edge_min + 0.02, 0.95, '95%', size=12, va='center')\n", (2837, 2889), True, 'import matplotlib.pyplot as plt\n'), ((2890, 2975), 'matplotlib.pyplot.hlines', 'plt.hlines', (['[0.05]', '(edge_max - 0.02)', '(edge_max + 0.02)'], {'linestyles': "['-']", 'color': '"""k"""'}), "([0.05], edge_max - 0.02, edge_max + 0.02, linestyles=['-'],\n color='k')\n", (2900, 2975), True, 'import matplotlib.pyplot as plt\n'), ((2972, 3043), 'matplotlib.pyplot.text', 'plt.text', (['(edge_max - 0.02)', '(0.05)', '"""5%"""'], {'size': '(12)', 'va': '"""center"""', 'ha': '"""right"""'}), "(edge_max - 0.02, 0.05, '5%', size=12, va='center', ha='right')\n", (2980, 3043), True, 'import matplotlib.pyplot as plt\n'), ((3044, 3054), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3052, 3054), True, 'import matplotlib.pyplot as plt\n'), ((3160, 3183), 'gammapy.modeling.models.PowerLawSpectralModel', 'PowerLawSpectralModel', ([], {}), '()\n', (3181, 3183), False, 'from gammapy.modeling.models import DiskSpatialModel, Models, PowerLawSpectralModel, SkyModel\n'), ((3192, 3210), 'gammapy.modeling.models.DiskSpatialModel', 'DiskSpatialModel', ([], {}), '()\n', (3208, 3210), False, 'from gammapy.modeling.models import DiskSpatialModel\n'), ((3220, 3292), 'gammapy.modeling.models.SkyModel', 'SkyModel', ([], {'spectral_model': 'pwl', 'spatial_model': 'gauss', 'name': '"""pwl-disk-model"""'}), "(spectral_model=pwl, spatial_model=gauss, name='pwl-disk-model')\n", (3228, 3292), False, 'from gammapy.modeling.models import DiskSpatialModel, Models, PowerLawSpectralModel, SkyModel\n'), ((3302, 3317), 'gammapy.modeling.models.Models', 'Models', (['[model]'], {}), '([model])\n', (3308, 3317), False, 'from gammapy.modeling.models import DiskSpatialModel, Models, PowerLawSpectralModel, SkyModel\n'), ((2137, 2161), 'numpy.linspace', 'np.linspace', (['(0)', '(0.3)', '(500)'], {}), '(0, 0.3, 500)\n', (2148, 2161), True, 'import numpy as np\n'), ((1726, 1737), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (1732, 1737), True, 'import numpy as np\n'), ((1748, 1759), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (1754, 1759), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import os
import numpy as np
puzzle = [
[5, 3, 0, 0, 7, 0, 0, 0, 0],
[6, 0, 0, 1, 9, 5, 0, 0, 0],
[0, 9, 8, 0, 0, 0, 0, 6, 0],
[8, 0, 0, 0, 6, 0, 0, 0, 3],
[4, 0, 0, 8, 0, 3, 0, 0, 1],
[7, 0, 0, 0, 2, 0, 0, 0, 6],
[0, 6, 0, 0, 0, 0, 2, 8, 0],
[0, 0, 0, 4, 1, 9, 0, 0, 5],
[0, 0, 0, 0, 8, 0, 0, 7, 9],
]
if __name__ == "__main__":
p = np.array(puzzle)
puzzle_groups = []
for split_row_thirds in np.vsplit(p, 3):
for split_col_thirds in np.hsplit(split_row_thirds, 3):
puzzle_groups.append(split_col_thirds.flatten())
print(puzzle_groups)
"""
[array([5, 3, 0, 6, 0, 0, 0, 9, 8]),
array([0, 7, 0, 1, 9, 5, 0, 0, 0]),
array([0, 0, 0, 0, 0, 0, 0, 6, 0]),
array([8, 0, 0, 4, 0, 0, 7, 0, 0]),
array([0, 6, 0, 8, 0, 3, 0, 2, 0]),
array([0, 0, 3, 0, 0, 1, 0, 0, 6]),
array([0, 6, 0, 0, 0, 0, 0, 0, 0]),
array([0, 0, 0, 4, 1, 9, 0, 8, 0]),
array([2, 8, 0, 0, 0, 5, 0, 7, 9])]
"""
| [
"numpy.vsplit",
"numpy.array",
"numpy.hsplit"
] | [((399, 415), 'numpy.array', 'np.array', (['puzzle'], {}), '(puzzle)\n', (407, 415), True, 'import numpy as np\n'), ((468, 483), 'numpy.vsplit', 'np.vsplit', (['p', '(3)'], {}), '(p, 3)\n', (477, 483), True, 'import numpy as np\n'), ((517, 547), 'numpy.hsplit', 'np.hsplit', (['split_row_thirds', '(3)'], {}), '(split_row_thirds, 3)\n', (526, 547), True, 'import numpy as np\n')] |
from __future__ import print_function
import unittest
from nose.tools import assert_equal, assert_raises
import numpy.testing as np_test
from numpy.testing import assert_almost_equal
from matplotlib.transforms import Affine2D, BlendedGenericTransform
from matplotlib.path import Path
from matplotlib.scale import LogScale
from matplotlib.testing.decorators import cleanup, image_comparison
import numpy as np
import matplotlib.transforms as mtrans
import matplotlib.pyplot as plt
import matplotlib.path as mpath
import matplotlib.patches as mpatches
@cleanup
def test_non_affine_caching():
class AssertingNonAffineTransform(mtrans.Transform):
"""
This transform raises an assertion error when called when it
shouldn't be and self.raise_on_transform is True.
"""
input_dims = output_dims = 2
is_affine = False
def __init__(self, *args, **kwargs):
mtrans.Transform.__init__(self, *args, **kwargs)
self.raise_on_transform = False
self.underlying_transform = mtrans.Affine2D().scale(10, 10)
def transform_path_non_affine(self, path):
if self.raise_on_transform:
assert False, ('Invalidated affine part of transform '
'unnecessarily.')
return self.underlying_transform.transform_path(path)
transform_path = transform_path_non_affine
def transform_non_affine(self, path):
if self.raise_on_transform:
assert False, ('Invalidated affine part of transform '
'unnecessarily.')
return self.underlying_transform.transform(path)
transform = transform_non_affine
my_trans = AssertingNonAffineTransform()
ax = plt.axes()
plt.plot(range(10), transform=my_trans + ax.transData)
plt.draw()
# enable the transform to raise an exception if it's non-affine transform
# method is triggered again.
my_trans.raise_on_transform = True
ax.transAxes.invalidate()
plt.draw()
@cleanup
def test_external_transform_api():
class ScaledBy(object):
def __init__(self, scale_factor):
self._scale_factor = scale_factor
def _as_mpl_transform(self, axes):
return mtrans.Affine2D().scale(self._scale_factor) + axes.transData
ax = plt.axes()
line, = plt.plot(range(10), transform=ScaledBy(10))
ax.set_xlim(0, 100)
ax.set_ylim(0, 100)
# assert that the top transform of the line is the scale transform.
np.testing.assert_allclose(line.get_transform()._a.get_matrix(),
mtrans.Affine2D().scale(10).get_matrix())
@image_comparison(baseline_images=['pre_transform_data'])
def test_pre_transform_plotting():
# a catch-all for as many as possible plot layouts which handle pre-transforming the data
# NOTE: The axis range is important in this plot. It should be x10 what the data suggests it should be
ax = plt.axes()
times10 = mtrans.Affine2D().scale(10)
ax.contourf(np.arange(48).reshape(6, 8), transform=times10 + ax.transData)
ax.pcolormesh(np.linspace(0, 4, 7),
np.linspace(5.5, 8, 9),
np.arange(48).reshape(6, 8),
transform=times10 + ax.transData)
ax.scatter(np.linspace(0, 10), np.linspace(10, 0),
transform=times10 + ax.transData)
x = np.linspace(8, 10, 20)
y = np.linspace(1, 5, 20)
u = 2*np.sin(x) + np.cos(y[:, np.newaxis])
v = np.sin(x) - np.cos(y[:, np.newaxis])
ax.streamplot(x, y, u, v, transform=times10 + ax.transData,
density=(1, 1), linewidth=u**2 + v**2)
# reduce the vector data down a bit for barb and quiver plotting
x, y = x[::3], y[::3]
u, v = u[::3, ::3], v[::3, ::3]
ax.quiver(x, y + 5, u, v, transform=times10 + ax.transData)
ax.barbs(x - 3, y + 5, u**2, v**2, transform=times10 + ax.transData)
def test_Affine2D_from_values():
points = np.array([ [0,0],
[10,20],
[-1,0],
])
t = mtrans.Affine2D.from_values(1,0,0,0,0,0)
actual = t.transform(points)
expected = np.array( [[0,0],[10,0],[-1,0]] )
assert_almost_equal(actual,expected)
t = mtrans.Affine2D.from_values(0,2,0,0,0,0)
actual = t.transform(points)
expected = np.array( [[0,0],[0,20],[0,-2]] )
assert_almost_equal(actual,expected)
t = mtrans.Affine2D.from_values(0,0,3,0,0,0)
actual = t.transform(points)
expected = np.array( [[0,0],[60,0],[0,0]] )
assert_almost_equal(actual,expected)
t = mtrans.Affine2D.from_values(0,0,0,4,0,0)
actual = t.transform(points)
expected = np.array( [[0,0],[0,80],[0,0]] )
assert_almost_equal(actual,expected)
t = mtrans.Affine2D.from_values(0,0,0,0,5,0)
actual = t.transform(points)
expected = np.array( [[5,0],[5,0],[5,0]] )
assert_almost_equal(actual,expected)
t = mtrans.Affine2D.from_values(0,0,0,0,0,6)
actual = t.transform(points)
expected = np.array( [[0,6],[0,6],[0,6]] )
assert_almost_equal(actual,expected)
def test_clipping_of_log():
# issue 804
M,L,C = Path.MOVETO, Path.LINETO, Path.CLOSEPOLY
points = [ (0.2, -99), (0.4, -99), (0.4, 20), (0.2, 20), (0.2, -99) ]
codes = [ M, L, L, L, C ]
path = Path(points, codes)
# something like this happens in plotting logarithmic histograms
trans = BlendedGenericTransform(Affine2D(),
LogScale.Log10Transform('clip'))
tpath = trans.transform_path_non_affine(path)
result = tpath.iter_segments(trans.get_affine(),
clip=(0, 0, 100, 100),
simplify=False)
tpoints, tcodes = zip(*result)
# Because y coordinate -99 is outside the clip zone, the first
# line segment is effectively removed. That means that the closepoly
# operation must be replaced by a move to the first point.
assert np.allclose(tcodes, [ M, M, L, L, L ])
assert np.allclose(tpoints[-1], tpoints[0])
class NonAffineForTest(mtrans.Transform):
"""
A class which looks like a non affine transform, but does whatever
the given transform does (even if it is affine). This is very useful
for testing NonAffine behaviour with a simple Affine transform.
"""
is_affine = False
output_dims = 2
input_dims = 2
def __init__(self, real_trans, *args, **kwargs):
self.real_trans = real_trans
r = mtrans.Transform.__init__(self, *args, **kwargs)
def transform_non_affine(self, values):
return self.real_trans.transform(values)
def transform_path_non_affine(self, path):
return self.real_trans.transform_path(path)
class BasicTransformTests(unittest.TestCase):
def setUp(self):
self.ta1 = mtrans.Affine2D(shorthand_name='ta1').rotate(np.pi / 2)
self.ta2 = mtrans.Affine2D(shorthand_name='ta2').translate(10, 0)
self.ta3 = mtrans.Affine2D(shorthand_name='ta3').scale(1, 2)
self.tn1 = NonAffineForTest(mtrans.Affine2D().translate(1, 2), shorthand_name='tn1')
self.tn2 = NonAffineForTest(mtrans.Affine2D().translate(1, 2), shorthand_name='tn2')
self.tn3 = NonAffineForTest(mtrans.Affine2D().translate(1, 2), shorthand_name='tn3')
# creates a transform stack which looks like ((A, (N, A)), A)
self.stack1 = (self.ta1 + (self.tn1 + self.ta2)) + self.ta3
# creates a transform stack which looks like (((A, N), A), A)
self.stack2 = self.ta1 + self.tn1 + self.ta2 + self.ta3
# creates a transform stack which is a subset of stack2
self.stack2_subset = self.tn1 + self.ta2 + self.ta3
# when in debug, the transform stacks can produce dot images:
# self.stack1.write_graphviz(file('stack1.dot', 'w'))
# self.stack2.write_graphviz(file('stack2.dot', 'w'))
# self.stack2_subset.write_graphviz(file('stack2_subset.dot', 'w'))
def test_transform_depth(self):
assert_equal(self.stack1.depth, 4)
assert_equal(self.stack2.depth, 4)
assert_equal(self.stack2_subset.depth, 3)
def test_left_to_right_iteration(self):
stack3 = (self.ta1 + (self.tn1 + (self.ta2 + self.tn2))) + self.ta3
# stack3.write_graphviz(file('stack3.dot', 'w'))
target_transforms = [stack3,
(self.tn1 + (self.ta2 + self.tn2)) + self.ta3,
(self.ta2 + self.tn2) + self.ta3,
self.tn2 + self.ta3,
self.ta3,
]
r = [rh for _, rh in stack3._iter_break_from_left_to_right()]
self.assertEqual(len(r), len(target_transforms))
for target_stack, stack in zip(target_transforms, r):
self.assertEqual(target_stack, stack)
def test_transform_shortcuts(self):
self.assertEqual(self.stack1 - self.stack2_subset, self.ta1)
self.assertEqual(self.stack2 - self.stack2_subset, self.ta1)
assert_equal((self.stack2_subset - self.stack2),
self.ta1.inverted(),
)
assert_equal((self.stack2_subset - self.stack2).depth, 1)
assert_raises(ValueError, self.stack1.__sub__, self.stack2)
aff1 = self.ta1 + (self.ta2 + self.ta3)
aff2 = self.ta2 + self.ta3
self.assertEqual(aff1 - aff2, self.ta1)
self.assertEqual(aff1 - self.ta2, aff1 + self.ta2.inverted())
self.assertEqual(self.stack1 - self.ta3, self.ta1 + (self.tn1 + self.ta2))
self.assertEqual(self.stack2 - self.ta3, self.ta1 + self.tn1 + self.ta2)
self.assertEqual((self.ta2 + self.ta3) - self.ta3 + self.ta3, self.ta2 + self.ta3)
def test_contains_branch(self):
r1 = (self.ta2 + self.ta1)
r2 = (self.ta2 + self.ta1)
self.assertEqual(r1, r2)
self.assertNotEqual(r1, self.ta1)
self.assertTrue(r1.contains_branch(r2))
self.assertTrue(r1.contains_branch(self.ta1))
self.assertFalse(r1.contains_branch(self.ta2))
self.assertFalse(r1.contains_branch((self.ta2 + self.ta2)))
self.assertEqual(r1, r2)
self.assertTrue(self.stack1.contains_branch(self.ta3))
self.assertTrue(self.stack2.contains_branch(self.ta3))
self.assertTrue(self.stack1.contains_branch(self.stack2_subset))
self.assertTrue(self.stack2.contains_branch(self.stack2_subset))
self.assertFalse(self.stack2_subset.contains_branch(self.stack1))
self.assertFalse(self.stack2_subset.contains_branch(self.stack2))
self.assertTrue(self.stack1.contains_branch((self.ta2 + self.ta3)))
self.assertTrue(self.stack2.contains_branch((self.ta2 + self.ta3)))
self.assertFalse(self.stack1.contains_branch((self.tn1 + self.ta2)))
def test_affine_simplification(self):
# tests that a transform stack only calls as much is absolutely necessary
# "non-affine" allowing the best possible optimization with complex
# transformation stacks.
points = np.array([[0, 0], [10, 20], [np.nan, 1], [-1, 0]], dtype=np.float64)
na_pts = self.stack1.transform_non_affine(points)
all_pts = self.stack1.transform(points)
na_expected = np.array([[1., 2.], [-19., 12.],
[np.nan, np.nan], [1., 1.]], dtype=np.float64)
all_expected = np.array([[11., 4.], [-9., 24.],
[np.nan, np.nan], [11., 2.]], dtype=np.float64)
# check we have the expected results from doing the affine part only
np_test.assert_array_almost_equal(na_pts, na_expected)
# check we have the expected results from a full transformation
np_test.assert_array_almost_equal(all_pts, all_expected)
# check we have the expected results from doing the transformation in two steps
np_test.assert_array_almost_equal(self.stack1.transform_affine(na_pts), all_expected)
# check that getting the affine transformation first, then fully transforming using that
# yields the same result as before.
np_test.assert_array_almost_equal(self.stack1.get_affine().transform(na_pts), all_expected)
# check that the affine part of stack1 & stack2 are equivalent (i.e. the optimization
# is working)
expected_result = (self.ta2 + self.ta3).get_matrix()
result = self.stack1.get_affine().get_matrix()
np_test.assert_array_equal(expected_result, result)
result = self.stack2.get_affine().get_matrix()
np_test.assert_array_equal(expected_result, result)
class TestTransformPlotInterface(unittest.TestCase):
def tearDown(self):
plt.close()
def test_line_extent_axes_coords(self):
# a simple line in axes coordinates
ax = plt.axes()
ax.plot([0.1, 1.2, 0.8], [0.9, 0.5, 0.8], transform=ax.transAxes)
np.testing.assert_array_equal(ax.dataLim.get_points(), np.array([[0, 0], [1, 1]]))
def test_line_extent_data_coords(self):
# a simple line in data coordinates
ax = plt.axes()
ax.plot([0.1, 1.2, 0.8], [0.9, 0.5, 0.8], transform=ax.transData)
np.testing.assert_array_equal(ax.dataLim.get_points(), np.array([[ 0.1, 0.5], [ 1.2, 0.9]]))
def test_line_extent_compound_coords1(self):
# a simple line in data coordinates in the y component, and in axes coordinates in the x
ax = plt.axes()
trans = mtrans.blended_transform_factory(ax.transAxes, ax.transData)
ax.plot([0.1, 1.2, 0.8], [35, -5, 18], transform=trans)
np.testing.assert_array_equal(ax.dataLim.get_points(), np.array([[ 0., -5.], [ 1., 35.]]))
plt.close()
def test_line_extent_predata_transform_coords(self):
# a simple line in (offset + data) coordinates
ax = plt.axes()
trans = mtrans.Affine2D().scale(10) + ax.transData
ax.plot([0.1, 1.2, 0.8], [35, -5, 18], transform=trans)
np.testing.assert_array_equal(ax.dataLim.get_points(), np.array([[1., -50.], [12., 350.]]))
plt.close()
def test_line_extent_compound_coords2(self):
# a simple line in (offset + data) coordinates in the y component, and in axes coordinates in the x
ax = plt.axes()
trans = mtrans.blended_transform_factory(ax.transAxes, mtrans.Affine2D().scale(10) + ax.transData)
ax.plot([0.1, 1.2, 0.8], [35, -5, 18], transform=trans)
np.testing.assert_array_equal(ax.dataLim.get_points(), np.array([[ 0., -50.], [ 1., 350.]]))
plt.close()
def test_line_extents_affine(self):
ax = plt.axes()
offset = mtrans.Affine2D().translate(10, 10)
plt.plot(range(10), transform=offset + ax.transData)
expeted_data_lim = np.array([[0., 0.], [9., 9.]]) + 10
np.testing.assert_array_almost_equal(ax.dataLim.get_points(),
expeted_data_lim)
def test_line_extents_non_affine(self):
ax = plt.axes()
offset = mtrans.Affine2D().translate(10, 10)
na_offset = NonAffineForTest(mtrans.Affine2D().translate(10, 10))
plt.plot(range(10), transform=offset + na_offset + ax.transData)
expeted_data_lim = np.array([[0., 0.], [9., 9.]]) + 20
np.testing.assert_array_almost_equal(ax.dataLim.get_points(),
expeted_data_lim)
def test_pathc_extents_non_affine(self):
ax = plt.axes()
offset = mtrans.Affine2D().translate(10, 10)
na_offset = NonAffineForTest(mtrans.Affine2D().translate(10, 10))
pth = mpath.Path(np.array([[0, 0], [0, 10], [10, 10], [10, 0]]))
patch = mpatches.PathPatch(pth, transform=offset + na_offset + ax.transData)
ax.add_patch(patch)
expeted_data_lim = np.array([[0., 0.], [10., 10.]]) + 20
np.testing.assert_array_almost_equal(ax.dataLim.get_points(),
expeted_data_lim)
def test_pathc_extents_affine(self):
ax = plt.axes()
offset = mtrans.Affine2D().translate(10, 10)
pth = mpath.Path(np.array([[0, 0], [0, 10], [10, 10], [10, 0]]))
patch = mpatches.PathPatch(pth, transform=offset + ax.transData)
ax.add_patch(patch)
expeted_data_lim = np.array([[0., 0.], [10., 10.]]) + 10
np.testing.assert_array_almost_equal(ax.dataLim.get_points(),
expeted_data_lim)
def test_line_extents_for_non_affine_transData(self):
ax = plt.axes(projection='polar')
# add 10 to the radius of the data
offset = mtrans.Affine2D().translate(0, 10)
plt.plot(range(10), transform=offset + ax.transData)
# the data lim of a polar plot is stored in coordinates
# before a transData transformation, hence the data limits
# are not what is being shown on the actual plot.
expeted_data_lim = np.array([[0., 0.], [9., 9.]]) + [0, 10]
np.testing.assert_array_almost_equal(ax.dataLim.get_points(),
expeted_data_lim)
if __name__=='__main__':
import nose
nose.runmodule(argv=['-s','--with-doctest'], exit=False)
| [
"matplotlib.pyplot.axes",
"numpy.allclose",
"matplotlib.testing.decorators.image_comparison",
"numpy.sin",
"matplotlib.transforms.Transform.__init__",
"numpy.arange",
"matplotlib.scale.LogScale.Log10Transform",
"numpy.testing.assert_array_almost_equal",
"matplotlib.transforms.Affine2D.from_values",
... | [((2718, 2774), 'matplotlib.testing.decorators.image_comparison', 'image_comparison', ([], {'baseline_images': "['pre_transform_data']"}), "(baseline_images=['pre_transform_data'])\n", (2734, 2774), False, 'from matplotlib.testing.decorators import cleanup, image_comparison\n'), ((1793, 1803), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (1801, 1803), True, 'import matplotlib.pyplot as plt\n'), ((1867, 1877), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (1875, 1877), True, 'import matplotlib.pyplot as plt\n'), ((2062, 2072), 'matplotlib.pyplot.draw', 'plt.draw', ([], {}), '()\n', (2070, 2072), True, 'import matplotlib.pyplot as plt\n'), ((2381, 2391), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (2389, 2391), True, 'import matplotlib.pyplot as plt\n'), ((3021, 3031), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (3029, 3031), True, 'import matplotlib.pyplot as plt\n'), ((3474, 3496), 'numpy.linspace', 'np.linspace', (['(8)', '(10)', '(20)'], {}), '(8, 10, 20)\n', (3485, 3496), True, 'import numpy as np\n'), ((3505, 3526), 'numpy.linspace', 'np.linspace', (['(1)', '(5)', '(20)'], {}), '(1, 5, 20)\n', (3516, 3526), True, 'import numpy as np\n'), ((4076, 4113), 'numpy.array', 'np.array', (['[[0, 0], [10, 20], [-1, 0]]'], {}), '([[0, 0], [10, 20], [-1, 0]])\n', (4084, 4113), True, 'import numpy as np\n'), ((4168, 4213), 'matplotlib.transforms.Affine2D.from_values', 'mtrans.Affine2D.from_values', (['(1)', '(0)', '(0)', '(0)', '(0)', '(0)'], {}), '(1, 0, 0, 0, 0, 0)\n', (4195, 4213), True, 'import matplotlib.transforms as mtrans\n'), ((4257, 4293), 'numpy.array', 'np.array', (['[[0, 0], [10, 0], [-1, 0]]'], {}), '([[0, 0], [10, 0], [-1, 0]])\n', (4265, 4293), True, 'import numpy as np\n'), ((4295, 4332), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (4314, 4332), False, 'from numpy.testing import assert_almost_equal\n'), ((4341, 4386), 'matplotlib.transforms.Affine2D.from_values', 'mtrans.Affine2D.from_values', (['(0)', '(2)', '(0)', '(0)', '(0)', '(0)'], {}), '(0, 2, 0, 0, 0, 0)\n', (4368, 4386), True, 'import matplotlib.transforms as mtrans\n'), ((4430, 4466), 'numpy.array', 'np.array', (['[[0, 0], [0, 20], [0, -2]]'], {}), '([[0, 0], [0, 20], [0, -2]])\n', (4438, 4466), True, 'import numpy as np\n'), ((4468, 4505), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (4487, 4505), False, 'from numpy.testing import assert_almost_equal\n'), ((4514, 4559), 'matplotlib.transforms.Affine2D.from_values', 'mtrans.Affine2D.from_values', (['(0)', '(0)', '(3)', '(0)', '(0)', '(0)'], {}), '(0, 0, 3, 0, 0, 0)\n', (4541, 4559), True, 'import matplotlib.transforms as mtrans\n'), ((4603, 4638), 'numpy.array', 'np.array', (['[[0, 0], [60, 0], [0, 0]]'], {}), '([[0, 0], [60, 0], [0, 0]])\n', (4611, 4638), True, 'import numpy as np\n'), ((4640, 4677), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (4659, 4677), False, 'from numpy.testing import assert_almost_equal\n'), ((4686, 4731), 'matplotlib.transforms.Affine2D.from_values', 'mtrans.Affine2D.from_values', (['(0)', '(0)', '(0)', '(4)', '(0)', '(0)'], {}), '(0, 0, 0, 4, 0, 0)\n', (4713, 4731), True, 'import matplotlib.transforms as mtrans\n'), ((4775, 4810), 'numpy.array', 'np.array', (['[[0, 0], [0, 80], [0, 0]]'], {}), '([[0, 0], [0, 80], [0, 0]])\n', (4783, 4810), True, 'import numpy as np\n'), ((4812, 4849), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (4831, 4849), False, 'from numpy.testing import assert_almost_equal\n'), ((4858, 4903), 'matplotlib.transforms.Affine2D.from_values', 'mtrans.Affine2D.from_values', (['(0)', '(0)', '(0)', '(0)', '(5)', '(0)'], {}), '(0, 0, 0, 0, 5, 0)\n', (4885, 4903), True, 'import matplotlib.transforms as mtrans\n'), ((4947, 4981), 'numpy.array', 'np.array', (['[[5, 0], [5, 0], [5, 0]]'], {}), '([[5, 0], [5, 0], [5, 0]])\n', (4955, 4981), True, 'import numpy as np\n'), ((4983, 5020), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (5002, 5020), False, 'from numpy.testing import assert_almost_equal\n'), ((5029, 5074), 'matplotlib.transforms.Affine2D.from_values', 'mtrans.Affine2D.from_values', (['(0)', '(0)', '(0)', '(0)', '(0)', '(6)'], {}), '(0, 0, 0, 0, 0, 6)\n', (5056, 5074), True, 'import matplotlib.transforms as mtrans\n'), ((5118, 5152), 'numpy.array', 'np.array', (['[[0, 6], [0, 6], [0, 6]]'], {}), '([[0, 6], [0, 6], [0, 6]])\n', (5126, 5152), True, 'import numpy as np\n'), ((5154, 5191), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['actual', 'expected'], {}), '(actual, expected)\n', (5173, 5191), False, 'from numpy.testing import assert_almost_equal\n'), ((5449, 5468), 'matplotlib.path.Path', 'Path', (['points', 'codes'], {}), '(points, codes)\n', (5453, 5468), False, 'from matplotlib.path import Path\n'), ((6114, 6150), 'numpy.allclose', 'np.allclose', (['tcodes', '[M, M, L, L, L]'], {}), '(tcodes, [M, M, L, L, L])\n', (6125, 6150), True, 'import numpy as np\n'), ((6164, 6200), 'numpy.allclose', 'np.allclose', (['tpoints[-1]', 'tpoints[0]'], {}), '(tpoints[-1], tpoints[0])\n', (6175, 6200), True, 'import numpy as np\n'), ((17425, 17482), 'nose.runmodule', 'nose.runmodule', ([], {'argv': "['-s', '--with-doctest']", 'exit': '(False)'}), "(argv=['-s', '--with-doctest'], exit=False)\n", (17439, 17482), False, 'import nose\n'), ((3181, 3201), 'numpy.linspace', 'np.linspace', (['(0)', '(4)', '(7)'], {}), '(0, 4, 7)\n', (3192, 3201), True, 'import numpy as np\n'), ((3222, 3244), 'numpy.linspace', 'np.linspace', (['(5.5)', '(8)', '(9)'], {}), '(5.5, 8, 9)\n', (3233, 3244), True, 'import numpy as np\n'), ((3366, 3384), 'numpy.linspace', 'np.linspace', (['(0)', '(10)'], {}), '(0, 10)\n', (3377, 3384), True, 'import numpy as np\n'), ((3386, 3404), 'numpy.linspace', 'np.linspace', (['(10)', '(0)'], {}), '(10, 0)\n', (3397, 3404), True, 'import numpy as np\n'), ((3549, 3573), 'numpy.cos', 'np.cos', (['y[:, np.newaxis]'], {}), '(y[:, np.newaxis])\n', (3555, 3573), True, 'import numpy as np\n'), ((3582, 3591), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (3588, 3591), True, 'import numpy as np\n'), ((3594, 3618), 'numpy.cos', 'np.cos', (['y[:, np.newaxis]'], {}), '(y[:, np.newaxis])\n', (3600, 3618), True, 'import numpy as np\n'), ((5575, 5585), 'matplotlib.transforms.Affine2D', 'Affine2D', ([], {}), '()\n', (5583, 5585), False, 'from matplotlib.transforms import Affine2D, BlendedGenericTransform\n'), ((5623, 5654), 'matplotlib.scale.LogScale.Log10Transform', 'LogScale.Log10Transform', (['"""clip"""'], {}), "('clip')\n", (5646, 5654), False, 'from matplotlib.scale import LogScale\n'), ((6638, 6686), 'matplotlib.transforms.Transform.__init__', 'mtrans.Transform.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (6663, 6686), True, 'import matplotlib.transforms as mtrans\n'), ((8163, 8197), 'nose.tools.assert_equal', 'assert_equal', (['self.stack1.depth', '(4)'], {}), '(self.stack1.depth, 4)\n', (8175, 8197), False, 'from nose.tools import assert_equal, assert_raises\n'), ((8206, 8240), 'nose.tools.assert_equal', 'assert_equal', (['self.stack2.depth', '(4)'], {}), '(self.stack2.depth, 4)\n', (8218, 8240), False, 'from nose.tools import assert_equal, assert_raises\n'), ((8249, 8290), 'nose.tools.assert_equal', 'assert_equal', (['self.stack2_subset.depth', '(3)'], {}), '(self.stack2_subset.depth, 3)\n', (8261, 8290), False, 'from nose.tools import assert_equal, assert_raises\n'), ((9344, 9401), 'nose.tools.assert_equal', 'assert_equal', (['(self.stack2_subset - self.stack2).depth', '(1)'], {}), '((self.stack2_subset - self.stack2).depth, 1)\n', (9356, 9401), False, 'from nose.tools import assert_equal, assert_raises\n'), ((9411, 9470), 'nose.tools.assert_raises', 'assert_raises', (['ValueError', 'self.stack1.__sub__', 'self.stack2'], {}), '(ValueError, self.stack1.__sub__, self.stack2)\n', (9424, 9470), False, 'from nose.tools import assert_equal, assert_raises\n'), ((11285, 11353), 'numpy.array', 'np.array', (['[[0, 0], [10, 20], [np.nan, 1], [-1, 0]]'], {'dtype': 'np.float64'}), '([[0, 0], [10, 20], [np.nan, 1], [-1, 0]], dtype=np.float64)\n', (11293, 11353), True, 'import numpy as np\n'), ((11483, 11573), 'numpy.array', 'np.array', (['[[1.0, 2.0], [-19.0, 12.0], [np.nan, np.nan], [1.0, 1.0]]'], {'dtype': 'np.float64'}), '([[1.0, 2.0], [-19.0, 12.0], [np.nan, np.nan], [1.0, 1.0]], dtype=\n np.float64)\n', (11491, 11573), True, 'import numpy as np\n'), ((11618, 11709), 'numpy.array', 'np.array', (['[[11.0, 4.0], [-9.0, 24.0], [np.nan, np.nan], [11.0, 2.0]]'], {'dtype': 'np.float64'}), '([[11.0, 4.0], [-9.0, 24.0], [np.nan, np.nan], [11.0, 2.0]], dtype=\n np.float64)\n', (11626, 11709), True, 'import numpy as np\n'), ((11818, 11872), 'numpy.testing.assert_array_almost_equal', 'np_test.assert_array_almost_equal', (['na_pts', 'na_expected'], {}), '(na_pts, na_expected)\n', (11851, 11872), True, 'import numpy.testing as np_test\n'), ((11953, 12009), 'numpy.testing.assert_array_almost_equal', 'np_test.assert_array_almost_equal', (['all_pts', 'all_expected'], {}), '(all_pts, all_expected)\n', (11986, 12009), True, 'import numpy.testing as np_test\n'), ((12674, 12725), 'numpy.testing.assert_array_equal', 'np_test.assert_array_equal', (['expected_result', 'result'], {}), '(expected_result, result)\n', (12700, 12725), True, 'import numpy.testing as np_test\n'), ((12790, 12841), 'numpy.testing.assert_array_equal', 'np_test.assert_array_equal', (['expected_result', 'result'], {}), '(expected_result, result)\n', (12816, 12841), True, 'import numpy.testing as np_test\n'), ((12929, 12940), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (12938, 12940), True, 'import matplotlib.pyplot as plt\n'), ((13051, 13061), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (13059, 13061), True, 'import matplotlib.pyplot as plt\n'), ((13329, 13339), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (13337, 13339), True, 'import matplotlib.pyplot as plt\n'), ((13677, 13687), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (13685, 13687), True, 'import matplotlib.pyplot as plt\n'), ((13704, 13764), 'matplotlib.transforms.blended_transform_factory', 'mtrans.blended_transform_factory', (['ax.transAxes', 'ax.transData'], {}), '(ax.transAxes, ax.transData)\n', (13736, 13764), True, 'import matplotlib.transforms as mtrans\n'), ((13940, 13951), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (13949, 13951), True, 'import matplotlib.pyplot as plt\n'), ((14078, 14088), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (14086, 14088), True, 'import matplotlib.pyplot as plt\n'), ((14320, 14331), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (14329, 14331), True, 'import matplotlib.pyplot as plt\n'), ((14503, 14513), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (14511, 14513), True, 'import matplotlib.pyplot as plt\n'), ((14798, 14809), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (14807, 14809), True, 'import matplotlib.pyplot as plt\n'), ((14864, 14874), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (14872, 14874), True, 'import matplotlib.pyplot as plt\n'), ((15244, 15254), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (15252, 15254), True, 'import matplotlib.pyplot as plt\n'), ((15711, 15721), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (15719, 15721), True, 'import matplotlib.pyplot as plt\n'), ((15938, 16006), 'matplotlib.patches.PathPatch', 'mpatches.PathPatch', (['pth'], {'transform': '(offset + na_offset + ax.transData)'}), '(pth, transform=offset + na_offset + ax.transData)\n', (15956, 16006), True, 'import matplotlib.patches as mpatches\n'), ((16289, 16299), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (16297, 16299), True, 'import matplotlib.pyplot as plt\n'), ((16442, 16498), 'matplotlib.patches.PathPatch', 'mpatches.PathPatch', (['pth'], {'transform': '(offset + ax.transData)'}), '(pth, transform=offset + ax.transData)\n', (16460, 16498), True, 'import matplotlib.patches as mpatches\n'), ((16800, 16828), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""polar"""'}), "(projection='polar')\n", (16808, 16828), True, 'import matplotlib.pyplot as plt\n'), ((925, 973), 'matplotlib.transforms.Transform.__init__', 'mtrans.Transform.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (950, 973), True, 'import matplotlib.transforms as mtrans\n'), ((3046, 3063), 'matplotlib.transforms.Affine2D', 'mtrans.Affine2D', ([], {}), '()\n', (3061, 3063), True, 'import matplotlib.transforms as mtrans\n'), ((3537, 3546), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (3543, 3546), True, 'import numpy as np\n'), ((13199, 13225), 'numpy.array', 'np.array', (['[[0, 0], [1, 1]]'], {}), '([[0, 0], [1, 1]])\n', (13207, 13225), True, 'import numpy as np\n'), ((13477, 13511), 'numpy.array', 'np.array', (['[[0.1, 0.5], [1.2, 0.9]]'], {}), '([[0.1, 0.5], [1.2, 0.9]])\n', (13485, 13511), True, 'import numpy as np\n'), ((13892, 13928), 'numpy.array', 'np.array', (['[[0.0, -5.0], [1.0, 35.0]]'], {}), '([[0.0, -5.0], [1.0, 35.0]])\n', (13900, 13928), True, 'import numpy as np\n'), ((14275, 14314), 'numpy.array', 'np.array', (['[[1.0, -50.0], [12.0, 350.0]]'], {}), '([[1.0, -50.0], [12.0, 350.0]])\n', (14283, 14314), True, 'import numpy as np\n'), ((14748, 14786), 'numpy.array', 'np.array', (['[[0.0, -50.0], [1.0, 350.0]]'], {}), '([[0.0, -50.0], [1.0, 350.0]])\n', (14756, 14786), True, 'import numpy as np\n'), ((15016, 15050), 'numpy.array', 'np.array', (['[[0.0, 0.0], [9.0, 9.0]]'], {}), '([[0.0, 0.0], [9.0, 9.0]])\n', (15024, 15050), True, 'import numpy as np\n'), ((15482, 15516), 'numpy.array', 'np.array', (['[[0.0, 0.0], [9.0, 9.0]]'], {}), '([[0.0, 0.0], [9.0, 9.0]])\n', (15490, 15516), True, 'import numpy as np\n'), ((15874, 15920), 'numpy.array', 'np.array', (['[[0, 0], [0, 10], [10, 10], [10, 0]]'], {}), '([[0, 0], [0, 10], [10, 10], [10, 0]])\n', (15882, 15920), True, 'import numpy as np\n'), ((16062, 16098), 'numpy.array', 'np.array', (['[[0.0, 0.0], [10.0, 10.0]]'], {}), '([[0.0, 0.0], [10.0, 10.0]])\n', (16070, 16098), True, 'import numpy as np\n'), ((16378, 16424), 'numpy.array', 'np.array', (['[[0, 0], [0, 10], [10, 10], [10, 0]]'], {}), '([[0, 0], [0, 10], [10, 10], [10, 0]])\n', (16386, 16424), True, 'import numpy as np\n'), ((16554, 16590), 'numpy.array', 'np.array', (['[[0.0, 0.0], [10.0, 10.0]]'], {}), '([[0.0, 0.0], [10.0, 10.0]])\n', (16562, 16590), True, 'import numpy as np\n'), ((17202, 17236), 'numpy.array', 'np.array', (['[[0.0, 0.0], [9.0, 9.0]]'], {}), '([[0.0, 0.0], [9.0, 9.0]])\n', (17210, 17236), True, 'import numpy as np\n'), ((3095, 3108), 'numpy.arange', 'np.arange', (['(48)'], {}), '(48)\n', (3104, 3108), True, 'import numpy as np\n'), ((3265, 3278), 'numpy.arange', 'np.arange', (['(48)'], {}), '(48)\n', (3274, 3278), True, 'import numpy as np\n'), ((6970, 7007), 'matplotlib.transforms.Affine2D', 'mtrans.Affine2D', ([], {'shorthand_name': '"""ta1"""'}), "(shorthand_name='ta1')\n", (6985, 7007), True, 'import matplotlib.transforms as mtrans\n'), ((7045, 7082), 'matplotlib.transforms.Affine2D', 'mtrans.Affine2D', ([], {'shorthand_name': '"""ta2"""'}), "(shorthand_name='ta2')\n", (7060, 7082), True, 'import matplotlib.transforms as mtrans\n'), ((7119, 7156), 'matplotlib.transforms.Affine2D', 'mtrans.Affine2D', ([], {'shorthand_name': '"""ta3"""'}), "(shorthand_name='ta3')\n", (7134, 7156), True, 'import matplotlib.transforms as mtrans\n'), ((14892, 14909), 'matplotlib.transforms.Affine2D', 'mtrans.Affine2D', ([], {}), '()\n', (14907, 14909), True, 'import matplotlib.transforms as mtrans\n'), ((15272, 15289), 'matplotlib.transforms.Affine2D', 'mtrans.Affine2D', ([], {}), '()\n', (15287, 15289), True, 'import matplotlib.transforms as mtrans\n'), ((15739, 15756), 'matplotlib.transforms.Affine2D', 'mtrans.Affine2D', ([], {}), '()\n', (15754, 15756), True, 'import matplotlib.transforms as mtrans\n'), ((16317, 16334), 'matplotlib.transforms.Affine2D', 'mtrans.Affine2D', ([], {}), '()\n', (16332, 16334), True, 'import matplotlib.transforms as mtrans\n'), ((16889, 16906), 'matplotlib.transforms.Affine2D', 'mtrans.Affine2D', ([], {}), '()\n', (16904, 16906), True, 'import matplotlib.transforms as mtrans\n'), ((1058, 1075), 'matplotlib.transforms.Affine2D', 'mtrans.Affine2D', ([], {}), '()\n', (1073, 1075), True, 'import matplotlib.transforms as mtrans\n'), ((7206, 7223), 'matplotlib.transforms.Affine2D', 'mtrans.Affine2D', ([], {}), '()\n', (7221, 7223), True, 'import matplotlib.transforms as mtrans\n'), ((7299, 7316), 'matplotlib.transforms.Affine2D', 'mtrans.Affine2D', ([], {}), '()\n', (7314, 7316), True, 'import matplotlib.transforms as mtrans\n'), ((7392, 7409), 'matplotlib.transforms.Affine2D', 'mtrans.Affine2D', ([], {}), '()\n', (7407, 7409), True, 'import matplotlib.transforms as mtrans\n'), ((14105, 14122), 'matplotlib.transforms.Affine2D', 'mtrans.Affine2D', ([], {}), '()\n', (14120, 14122), True, 'import matplotlib.transforms as mtrans\n'), ((15345, 15362), 'matplotlib.transforms.Affine2D', 'mtrans.Affine2D', ([], {}), '()\n', (15360, 15362), True, 'import matplotlib.transforms as mtrans\n'), ((15812, 15829), 'matplotlib.transforms.Affine2D', 'mtrans.Affine2D', ([], {}), '()\n', (15827, 15829), True, 'import matplotlib.transforms as mtrans\n'), ((2310, 2327), 'matplotlib.transforms.Affine2D', 'mtrans.Affine2D', ([], {}), '()\n', (2325, 2327), True, 'import matplotlib.transforms as mtrans\n'), ((2669, 2686), 'matplotlib.transforms.Affine2D', 'mtrans.Affine2D', ([], {}), '()\n', (2684, 2686), True, 'import matplotlib.transforms as mtrans\n'), ((14577, 14594), 'matplotlib.transforms.Affine2D', 'mtrans.Affine2D', ([], {}), '()\n', (14592, 14594), True, 'import matplotlib.transforms as mtrans\n')] |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from inference_pass_test import InferencePassTest
from quant_dequant_test import QuantDequantTest
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.core import PassVersionChecker
from paddle.fluid.core import AnalysisConfig
class TensorRTMatMulQuantDequantDims3Test(QuantDequantTest):
def setUp(self):
self.set_params()
def network():
self.data = fluid.data(
name='data', shape=[1, 28, 28], dtype='float32')
self.label = fluid.data(name='label', shape=[1, 1], dtype='int64')
matmul_out = fluid.layers.matmul(
x=self.data,
y=self.data,
transpose_x=self.transpose_x,
transpose_y=self.transpose_y,
alpha=self.alpha)
fc_out = fluid.layers.fc(input=matmul_out,
size=10,
num_flatten_dims=1,
bias_attr=False,
act=None)
result = fluid.layers.relu(fc_out)
loss = fluid.layers.cross_entropy(input=result, label=self.label)
avg_loss = fluid.layers.mean(loss)
return avg_loss, result
self.main_program.random_seed = 2
self.startup_program.random_seed = 2
self.test_main_program.random_seed = 2
#self.test_startup_program.random_seed = 2
with fluid.unique_name.guard():
with fluid.program_guard(self.main_program, self.startup_program):
self.loss, result = network()
opt = fluid.optimizer.Adam(learning_rate=0.0001)
opt.minimize(self.loss)
with fluid.unique_name.guard():
with fluid.program_guard(self.test_main_program,
self.startup_program):
network()
self.feeds = {"data": np.random.random([1, 28, 28]).astype("float32")}
self.fetch_list = [result]
self.enable_trt = True
self.trt_parameters = TensorRTMatMulQuantDequantDims3Test.TensorRTParam(
1 << 30, 32, 0, AnalysisConfig.Precision.Int8, False, False)
self.activation_quantize_type = 'moving_average_abs_max'
self.weight_quantize_type = 'channel_wise_abs_max'
def set_params(self):
self.transpose_x = False
self.transpose_y = False
self.alpha = 1.0
def test_check_output(self):
#self.quant_dequant()
if core.is_compiled_with_cuda():
use_gpu = True
self.check_output_with_option(
use_gpu, atol=1e-1, flatten=False, rtol=1e-1)
self.assertTrue(
PassVersionChecker.IsCompatible('tensorrt_subgraph_pass'))
class TensorRTMatMulQuantDequantDims3TransposeXTest(
TensorRTMatMulQuantDequantDims3Test):
def set_params(self):
self.transpose_x = True
self.transpose_y = False
self.alpha = 1.0
class TensorRTMatMulQuantDequantDims3TransposeYTest(
TensorRTMatMulQuantDequantDims3Test):
def set_params(self):
self.transpose_x = False
self.transpose_y = True
self.alpha = 1.0
class TensorRTMatMulQuantDequantDims3TransposeXYTest(
TensorRTMatMulQuantDequantDims3Test):
def set_params(self):
self.transpose_x = True
self.transpose_y = True
self.alpha = 1.0
class TensorRTMatMulQuantDequantDims4Test(QuantDequantTest):
def setUp(self):
self.set_params()
def network():
self.data = fluid.data(
name='data', shape=[1, 28, 28], dtype='float32')
self.label = fluid.data(name='label', shape=[1, 1], dtype='int64')
reshape_out = fluid.layers.reshape(self.data, shape=[1, 4, 14, 14])
matmul_out = fluid.layers.matmul(
x=reshape_out,
y=reshape_out,
transpose_x=self.transpose_x,
transpose_y=self.transpose_y,
alpha=self.alpha)
out = fluid.layers.batch_norm(matmul_out, is_test=True)
fc_out = fluid.layers.fc(input=matmul_out,
size=10,
num_flatten_dims=1,
bias_attr=False,
act=None)
result = fluid.layers.relu(fc_out)
loss = fluid.layers.cross_entropy(input=result, label=self.label)
avg_loss = fluid.layers.mean(loss)
return avg_loss, result
self.main_program.random_seed = 2
self.startup_program.random_seed = 2
self.test_main_program.random_seed = 2
#self.test_startup_program.random_seed = 2
with fluid.unique_name.guard():
with fluid.program_guard(self.main_program, self.startup_program):
self.loss, result = network()
opt = fluid.optimizer.Adam(learning_rate=0.0001)
opt.minimize(self.loss)
with fluid.unique_name.guard():
with fluid.program_guard(self.test_main_program,
self.startup_program):
network()
self.feeds = {"data": np.random.random([1, 28, 28]).astype("float32")}
self.fetch_list = [result]
self.enable_trt = True
self.trt_parameters = TensorRTMatMulQuantDequantDims4Test.TensorRTParam(
1 << 30, 32, 0, AnalysisConfig.Precision.Int8, False, False)
self.activation_quantize_type = 'moving_average_abs_max'
self.weight_quantize_type = 'channel_wise_abs_max'
def set_params(self):
self.transpose_x = False
self.transpose_y = False
self.alpha = 1.0
def test_check_output(self):
#self.quant_dequant()
if core.is_compiled_with_cuda():
use_gpu = True
self.check_output_with_option(
use_gpu, atol=1e-1, flatten=False, rtol=1e-1)
self.assertTrue(
PassVersionChecker.IsCompatible('tensorrt_subgraph_pass'))
class TensorRTMatMulQuantDequantDims4TransposeXTest(
TensorRTMatMulQuantDequantDims4Test):
def set_params(self):
self.transpose_x = True
self.transpose_y = False
self.alpha = 1.0
class TensorRTMatMulQuantDequantDims4TransposeYTest(
TensorRTMatMulQuantDequantDims4Test):
def set_params(self):
self.transpose_x = False
self.transpose_y = True
self.alpha = 1.0
class TensorRTMatMulQuantDequantDims4TransposeXYTest(
TensorRTMatMulQuantDequantDims4Test):
def set_params(self):
self.transpose_x = True
self.transpose_y = True
self.alpha = 1.0
class TensorRTMatMulQuantDequantDims4ScaleTest(
TensorRTMatMulQuantDequantDims4Test):
def set_params(self):
self.transpose_x = False
self.transpose_y = False
self.alpha = 2.0
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"paddle.fluid.layers.matmul",
"paddle.fluid.data",
"paddle.fluid.layers.reshape",
"paddle.fluid.unique_name.guard",
"paddle.fluid.layers.relu",
"paddle.fluid.program_guard",
"paddle.fluid.optimizer.Adam",
"paddle.fluid.layers.batch_norm",
"numpy.random.random",
"paddle.fluid.lay... | [((7672, 7687), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7685, 7687), False, 'import unittest\n'), ((3170, 3198), 'paddle.fluid.core.is_compiled_with_cuda', 'core.is_compiled_with_cuda', ([], {}), '()\n', (3196, 3198), True, 'import paddle.fluid.core as core\n'), ((6509, 6537), 'paddle.fluid.core.is_compiled_with_cuda', 'core.is_compiled_with_cuda', ([], {}), '()\n', (6535, 6537), True, 'import paddle.fluid.core as core\n'), ((1058, 1117), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""data"""', 'shape': '[1, 28, 28]', 'dtype': '"""float32"""'}), "(name='data', shape=[1, 28, 28], dtype='float32')\n", (1068, 1117), True, 'import paddle.fluid as fluid\n'), ((1160, 1213), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""label"""', 'shape': '[1, 1]', 'dtype': '"""int64"""'}), "(name='label', shape=[1, 1], dtype='int64')\n", (1170, 1213), True, 'import paddle.fluid as fluid\n'), ((1239, 1366), 'paddle.fluid.layers.matmul', 'fluid.layers.matmul', ([], {'x': 'self.data', 'y': 'self.data', 'transpose_x': 'self.transpose_x', 'transpose_y': 'self.transpose_y', 'alpha': 'self.alpha'}), '(x=self.data, y=self.data, transpose_x=self.transpose_x,\n transpose_y=self.transpose_y, alpha=self.alpha)\n', (1258, 1366), True, 'import paddle.fluid as fluid\n'), ((1465, 1559), 'paddle.fluid.layers.fc', 'fluid.layers.fc', ([], {'input': 'matmul_out', 'size': '(10)', 'num_flatten_dims': '(1)', 'bias_attr': '(False)', 'act': 'None'}), '(input=matmul_out, size=10, num_flatten_dims=1, bias_attr=\n False, act=None)\n', (1480, 1559), True, 'import paddle.fluid as fluid\n'), ((1724, 1749), 'paddle.fluid.layers.relu', 'fluid.layers.relu', (['fc_out'], {}), '(fc_out)\n', (1741, 1749), True, 'import paddle.fluid as fluid\n'), ((1769, 1827), 'paddle.fluid.layers.cross_entropy', 'fluid.layers.cross_entropy', ([], {'input': 'result', 'label': 'self.label'}), '(input=result, label=self.label)\n', (1795, 1827), True, 'import paddle.fluid as fluid\n'), ((1851, 1874), 'paddle.fluid.layers.mean', 'fluid.layers.mean', (['loss'], {}), '(loss)\n', (1868, 1874), True, 'import paddle.fluid as fluid\n'), ((2110, 2135), 'paddle.fluid.unique_name.guard', 'fluid.unique_name.guard', ([], {}), '()\n', (2133, 2135), True, 'import paddle.fluid as fluid\n'), ((2380, 2405), 'paddle.fluid.unique_name.guard', 'fluid.unique_name.guard', ([], {}), '()\n', (2403, 2405), True, 'import paddle.fluid as fluid\n'), ((4245, 4304), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""data"""', 'shape': '[1, 28, 28]', 'dtype': '"""float32"""'}), "(name='data', shape=[1, 28, 28], dtype='float32')\n", (4255, 4304), True, 'import paddle.fluid as fluid\n'), ((4347, 4400), 'paddle.fluid.data', 'fluid.data', ([], {'name': '"""label"""', 'shape': '[1, 1]', 'dtype': '"""int64"""'}), "(name='label', shape=[1, 1], dtype='int64')\n", (4357, 4400), True, 'import paddle.fluid as fluid\n'), ((4427, 4480), 'paddle.fluid.layers.reshape', 'fluid.layers.reshape', (['self.data'], {'shape': '[1, 4, 14, 14]'}), '(self.data, shape=[1, 4, 14, 14])\n', (4447, 4480), True, 'import paddle.fluid as fluid\n'), ((4506, 4638), 'paddle.fluid.layers.matmul', 'fluid.layers.matmul', ([], {'x': 'reshape_out', 'y': 'reshape_out', 'transpose_x': 'self.transpose_x', 'transpose_y': 'self.transpose_y', 'alpha': 'self.alpha'}), '(x=reshape_out, y=reshape_out, transpose_x=self.\n transpose_x, transpose_y=self.transpose_y, alpha=self.alpha)\n', (4525, 4638), True, 'import paddle.fluid as fluid\n'), ((4733, 4782), 'paddle.fluid.layers.batch_norm', 'fluid.layers.batch_norm', (['matmul_out'], {'is_test': '(True)'}), '(matmul_out, is_test=True)\n', (4756, 4782), True, 'import paddle.fluid as fluid\n'), ((4804, 4898), 'paddle.fluid.layers.fc', 'fluid.layers.fc', ([], {'input': 'matmul_out', 'size': '(10)', 'num_flatten_dims': '(1)', 'bias_attr': '(False)', 'act': 'None'}), '(input=matmul_out, size=10, num_flatten_dims=1, bias_attr=\n False, act=None)\n', (4819, 4898), True, 'import paddle.fluid as fluid\n'), ((5063, 5088), 'paddle.fluid.layers.relu', 'fluid.layers.relu', (['fc_out'], {}), '(fc_out)\n', (5080, 5088), True, 'import paddle.fluid as fluid\n'), ((5108, 5166), 'paddle.fluid.layers.cross_entropy', 'fluid.layers.cross_entropy', ([], {'input': 'result', 'label': 'self.label'}), '(input=result, label=self.label)\n', (5134, 5166), True, 'import paddle.fluid as fluid\n'), ((5190, 5213), 'paddle.fluid.layers.mean', 'fluid.layers.mean', (['loss'], {}), '(loss)\n', (5207, 5213), True, 'import paddle.fluid as fluid\n'), ((5449, 5474), 'paddle.fluid.unique_name.guard', 'fluid.unique_name.guard', ([], {}), '()\n', (5472, 5474), True, 'import paddle.fluid as fluid\n'), ((5719, 5744), 'paddle.fluid.unique_name.guard', 'fluid.unique_name.guard', ([], {}), '()\n', (5742, 5744), True, 'import paddle.fluid as fluid\n'), ((2154, 2214), 'paddle.fluid.program_guard', 'fluid.program_guard', (['self.main_program', 'self.startup_program'], {}), '(self.main_program, self.startup_program)\n', (2173, 2214), True, 'import paddle.fluid as fluid\n'), ((2284, 2326), 'paddle.fluid.optimizer.Adam', 'fluid.optimizer.Adam', ([], {'learning_rate': '(0.0001)'}), '(learning_rate=0.0001)\n', (2304, 2326), True, 'import paddle.fluid as fluid\n'), ((2424, 2489), 'paddle.fluid.program_guard', 'fluid.program_guard', (['self.test_main_program', 'self.startup_program'], {}), '(self.test_main_program, self.startup_program)\n', (2443, 2489), True, 'import paddle.fluid as fluid\n'), ((3377, 3434), 'paddle.fluid.core.PassVersionChecker.IsCompatible', 'PassVersionChecker.IsCompatible', (['"""tensorrt_subgraph_pass"""'], {}), "('tensorrt_subgraph_pass')\n", (3408, 3434), False, 'from paddle.fluid.core import PassVersionChecker\n'), ((5493, 5553), 'paddle.fluid.program_guard', 'fluid.program_guard', (['self.main_program', 'self.startup_program'], {}), '(self.main_program, self.startup_program)\n', (5512, 5553), True, 'import paddle.fluid as fluid\n'), ((5623, 5665), 'paddle.fluid.optimizer.Adam', 'fluid.optimizer.Adam', ([], {'learning_rate': '(0.0001)'}), '(learning_rate=0.0001)\n', (5643, 5665), True, 'import paddle.fluid as fluid\n'), ((5763, 5828), 'paddle.fluid.program_guard', 'fluid.program_guard', (['self.test_main_program', 'self.startup_program'], {}), '(self.test_main_program, self.startup_program)\n', (5782, 5828), True, 'import paddle.fluid as fluid\n'), ((6716, 6773), 'paddle.fluid.core.PassVersionChecker.IsCompatible', 'PassVersionChecker.IsCompatible', (['"""tensorrt_subgraph_pass"""'], {}), "('tensorrt_subgraph_pass')\n", (6747, 6773), False, 'from paddle.fluid.core import PassVersionChecker\n'), ((2584, 2613), 'numpy.random.random', 'np.random.random', (['[1, 28, 28]'], {}), '([1, 28, 28])\n', (2600, 2613), True, 'import numpy as np\n'), ((5923, 5952), 'numpy.random.random', 'np.random.random', (['[1, 28, 28]'], {}), '([1, 28, 28])\n', (5939, 5952), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import rospy
import numpy as np
import time
from geometry_msgs.msg import Twist
from sensor_msgs.msg import LaserScan
class LaserFollowGapNode:
def __init__(self):
''' initialise DemoNode object '''
# Register ROS node
rospy.init_node('laser_follow_gap_node')
# Predefine variables
self._longest_beam_angle = None
self._longest_beam_distance = None
# Controller gains
self._angular_gain = 0.5
self._linear_gain = 0.1
# If angle difference is bigger than heading angle (heading is rotation in place) only rotate
self._heading_angle = np.pi/4 # +-45 degree
# Define subscriber and publisher
self._cmd_vel_publisher = rospy.Publisher('cmd_vel', Twist, queue_size=10)
self._laser_subscriber = rospy.Subscriber('/laser/scan', LaserScan, self._laser_callback)
# Create mesage object
self._vel_msg = Twist()
# Await messages to start being published
while not rospy.wait_for_message('/laser/scan', LaserScan):
rospy.logwarn(f'awaiting /laser/scan topic')
time.sleep(1)
# Run controll loop with 10Hz frequency calling _control_loop callback
self._lights_controller_timer = rospy.Timer(rospy.Duration(0.05), self._control_loop)
# Notify user that node started
rospy.loginfo(f'{rospy.get_name()} started')
def _control_loop(self, *args):
'''executes main controll loop'''
# Find smallest angle difference between current rotation and desired rotation
smallest_angle = np.arctan2(np.sin(self._longest_beam_angle), np.cos(self._longest_beam_angle))
self._vel_msg.angular.z = smallest_angle * self._angular_gain
# If smallest angle is grater than heading rotate in place
if np.abs(smallest_angle) > self._heading_angle:
self._vel_msg.linear.x = 0.0
else:
# Scale velocity command with respect to maximal distance
self._vel_msg.linear.x = self._longest_beam_distance * self._linear_gain
# Publish velocity
self._cmd_vel_publisher.publish(self._vel_msg)
def _laser_callback(self, scan):
'''laser topic callback'''
# Remove all infinities
ranges = np.array(scan.ranges)
ranges[ranges >= scan.angle_max] = scan.range_min
# Find reachable maximum
idx = np.argmax(ranges, axis=0)
beam_count = len(ranges)
self._longest_beam_distance = ranges[idx]
# Clip longest beam to max 10 meters
self._longest_beam_distance = np.clip(self._longest_beam_distance, 0, 25)
self._longest_beam_angle = (idx/beam_count) * np.abs(scan.angle_max-scan.angle_min) + scan.angle_min
def main():
try:
follow_the_gap = LaserFollowGapNode()
rospy.spin()
except Exception as e:
rospy.logerr(f'laser_follow_gap_node error: {e}')
exit(1)
if __name__ == '__main__':
main() | [
"rospy.logwarn",
"rospy.logerr",
"rospy.Subscriber",
"numpy.abs",
"rospy.wait_for_message",
"numpy.argmax",
"rospy.Publisher",
"geometry_msgs.msg.Twist",
"numpy.clip",
"time.sleep",
"numpy.sin",
"numpy.array",
"rospy.init_node",
"numpy.cos",
"rospy.get_name",
"rospy.spin",
"rospy.Dur... | [((274, 314), 'rospy.init_node', 'rospy.init_node', (['"""laser_follow_gap_node"""'], {}), "('laser_follow_gap_node')\n", (289, 314), False, 'import rospy\n'), ((770, 818), 'rospy.Publisher', 'rospy.Publisher', (['"""cmd_vel"""', 'Twist'], {'queue_size': '(10)'}), "('cmd_vel', Twist, queue_size=10)\n", (785, 818), False, 'import rospy\n'), ((852, 916), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/laser/scan"""', 'LaserScan', 'self._laser_callback'], {}), "('/laser/scan', LaserScan, self._laser_callback)\n", (868, 916), False, 'import rospy\n'), ((973, 980), 'geometry_msgs.msg.Twist', 'Twist', ([], {}), '()\n', (978, 980), False, 'from geometry_msgs.msg import Twist\n'), ((2350, 2371), 'numpy.array', 'np.array', (['scan.ranges'], {}), '(scan.ranges)\n', (2358, 2371), True, 'import numpy as np\n'), ((2477, 2502), 'numpy.argmax', 'np.argmax', (['ranges'], {'axis': '(0)'}), '(ranges, axis=0)\n', (2486, 2502), True, 'import numpy as np\n'), ((2678, 2721), 'numpy.clip', 'np.clip', (['self._longest_beam_distance', '(0)', '(25)'], {}), '(self._longest_beam_distance, 0, 25)\n', (2685, 2721), True, 'import numpy as np\n'), ((2908, 2920), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (2918, 2920), False, 'import rospy\n'), ((1058, 1106), 'rospy.wait_for_message', 'rospy.wait_for_message', (['"""/laser/scan"""', 'LaserScan'], {}), "('/laser/scan', LaserScan)\n", (1080, 1106), False, 'import rospy\n'), ((1120, 1164), 'rospy.logwarn', 'rospy.logwarn', (['f"""awaiting /laser/scan topic"""'], {}), "(f'awaiting /laser/scan topic')\n", (1133, 1164), False, 'import rospy\n'), ((1177, 1190), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1187, 1190), False, 'import time\n'), ((1323, 1343), 'rospy.Duration', 'rospy.Duration', (['(0.05)'], {}), '(0.05)\n', (1337, 1343), False, 'import rospy\n'), ((1663, 1695), 'numpy.sin', 'np.sin', (['self._longest_beam_angle'], {}), '(self._longest_beam_angle)\n', (1669, 1695), True, 'import numpy as np\n'), ((1697, 1729), 'numpy.cos', 'np.cos', (['self._longest_beam_angle'], {}), '(self._longest_beam_angle)\n', (1703, 1729), True, 'import numpy as np\n'), ((1880, 1902), 'numpy.abs', 'np.abs', (['smallest_angle'], {}), '(smallest_angle)\n', (1886, 1902), True, 'import numpy as np\n'), ((2956, 3005), 'rospy.logerr', 'rospy.logerr', (['f"""laser_follow_gap_node error: {e}"""'], {}), "(f'laser_follow_gap_node error: {e}')\n", (2968, 3005), False, 'import rospy\n'), ((2776, 2815), 'numpy.abs', 'np.abs', (['(scan.angle_max - scan.angle_min)'], {}), '(scan.angle_max - scan.angle_min)\n', (2782, 2815), True, 'import numpy as np\n'), ((1431, 1447), 'rospy.get_name', 'rospy.get_name', ([], {}), '()\n', (1445, 1447), False, 'import rospy\n')] |
import numpy as np
import h5py
import os
import illustris_python as il
import matplotlib.pyplot as plt
# snap_num = 99
diskID = np.load('F:/Linux/data/diskID.npy')
StellarMass = il.groupcat.loadSubhalos('F:/Linux/data/TNG/Groupcatalog', 99, 'SubhaloMassType')[:,4]
#load barred galaxies' ID
bigID = np.load('F:/Linux/data/bigID.npy')
smallID = np.load('F:/Linux/data/smallID.npy')
ids = np.concatenate((smallID, bigID))
#Barred halo's mass
halomass = StellarMass[ids]
halomass = np.log10(halomass*10**10)
diskmass = StellarMass[diskID]
StellarMass = np.log10(diskmass*10**10)
#Create figer
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.set_xlabel('Stellar Mass')
ax1.set_ylabel('Bar Fraction')
ax2 = ax1.twinx()
ax2.set_ylabel('Halo number N')
#plot histogram
n,bins,others = ax2.hist(halomass, 20, rwidth=0.9)
ax2.set_xlim(9.8,12)
Fraction = []
x_point = []
for i in range(20):
low = bins[i]
high = bins[i+1]
x_point.append((low + high)/2)
disknum = len(diskmass[(diskmass >= low) & (diskmass < high)])
barred = len(halomass[(halomass >= low) & (halomass < high)])
Barfraction = barred / disknum
Fraction.append(Barfraction)
ax1.plot(x_point, Fraction, 'o', c = 'r')
| [
"numpy.load",
"illustris_python.groupcat.loadSubhalos",
"matplotlib.pyplot.figure",
"numpy.log10",
"numpy.concatenate"
] | [((138, 173), 'numpy.load', 'np.load', (['"""F:/Linux/data/diskID.npy"""'], {}), "('F:/Linux/data/diskID.npy')\n", (145, 173), True, 'import numpy as np\n'), ((312, 346), 'numpy.load', 'np.load', (['"""F:/Linux/data/bigID.npy"""'], {}), "('F:/Linux/data/bigID.npy')\n", (319, 346), True, 'import numpy as np\n'), ((358, 394), 'numpy.load', 'np.load', (['"""F:/Linux/data/smallID.npy"""'], {}), "('F:/Linux/data/smallID.npy')\n", (365, 394), True, 'import numpy as np\n'), ((402, 434), 'numpy.concatenate', 'np.concatenate', (['(smallID, bigID)'], {}), '((smallID, bigID))\n', (416, 434), True, 'import numpy as np\n'), ((499, 528), 'numpy.log10', 'np.log10', (['(halomass * 10 ** 10)'], {}), '(halomass * 10 ** 10)\n', (507, 528), True, 'import numpy as np\n'), ((572, 601), 'numpy.log10', 'np.log10', (['(diskmass * 10 ** 10)'], {}), '(diskmass * 10 ** 10)\n', (580, 601), True, 'import numpy as np\n'), ((622, 634), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (632, 634), True, 'import matplotlib.pyplot as plt\n'), ((189, 274), 'illustris_python.groupcat.loadSubhalos', 'il.groupcat.loadSubhalos', (['"""F:/Linux/data/TNG/Groupcatalog"""', '(99)', '"""SubhaloMassType"""'], {}), "('F:/Linux/data/TNG/Groupcatalog', 99,\n 'SubhaloMassType')\n", (213, 274), True, 'import illustris_python as il\n')] |
# pylint: disable=E1101,C1801,C0103
"""Defines the GUI IO file for Nastran."""
from __future__ import annotations
import os
import sys
import traceback
from itertools import chain
from io import StringIO
from collections import defaultdict, OrderedDict
from typing import List, Dict, Tuple, Any, TYPE_CHECKING
#VTK_TRIANGLE = 5
#VTK_QUADRATIC_TRIANGLE = 22
#VTK_QUAD = 9
#VTK_QUADRATIC_QUAD = 23
#VTK_TETRA = 10
#VTK_QUADRATIC_TETRA = 24
#VTK_WEDGE = 13
#VTK_QUADRATIC_WEDGE = 26
#VTK_HEXAHEDRON = 12
#VTK_QUADRATIC_HEXAHEDRON = 25
import numpy as np
from numpy.linalg import norm # type: ignore
#: makes vtk work on certain builds of vtk
#: we have to call this before vtk; you can't just try-except it
#: unused_import
from pyNastran.gui.qt_version import qt_version
if qt_version == 'pyqt5':
import PyQt5
elif qt_version == 'pyside2':
import PySide2
else:
raise NotImplementedError(qt_version)
from qtpy import QtCore
from qtpy.QtWidgets import QDockWidget
import vtk
from vtk import (vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron,
vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra,
vtkQuadraticWedge, vtkQuadraticHexahedron,
vtkPyramid) #vtkQuadraticPyramid
#from pyNastran import is_release
from pyNastran import __version__
from pyNastran.utils.numpy_utils import integer_types
from pyNastran.femutils.nan import (
isfinite, isfinite_and_greater_than, isfinite_and_nonzero,
isgreater_int)
from pyNastran.femutils.utils import duplicates, is_monotonic, underflow_norm
from pyNastran.bdf.bdf import (BDF,
CAERO1, CAERO2, CAERO3, CAERO4, CAERO5,
CQUAD4, CQUAD8, CQUAD, CQUADR, CSHEAR,
CTRIA3, CTRIA6, CTRIAR,
CPLSTN3, CPLSTN4, CPLSTN6, CPLSTN8,
CPLSTS3, CPLSTS4, CPLSTS6, CPLSTS8,
CTRAX3, CTRIAX6, CTRIAX, #CTRAX6,
CQUADX4, CQUADX8, CQUADX,
CONM2)
from pyNastran.bdf.cards.aero.zona import CAERO7, BODY7
from pyNastran.bdf.cards.elements.solid import (
CTETRA4, CTETRA10, CPENTA6, CPENTA15,
CHEXA8, CHEXA20, CIHEX1, CIHEX2,
CPYRAM5, CPYRAM13,
)
from pyNastran.bdf.mesh_utils.delete_bad_elements import (
tri_quality, quad_quality, get_min_max_theta)
from pyNastran.bdf.mesh_utils.export_mcids import export_mcids_all
from pyNastran.bdf.mesh_utils.forces_moments import get_load_arrays, get_pressure_array
from pyNastran.bdf.mesh_utils.mpc_dependency import get_mpc_node_ids
from pyNastran.op2.op2 import OP2
#from pyNastran.f06.f06_formatting import get_key0
from pyNastran.op2.op2_geom import OP2Geom
from pyNastran.op2.result_objects.stress_object import StressObject
from pyNastran.gui.utils.vtk.base_utils import numpy_to_vtk, numpy_to_vtkIdTypeArray
from pyNastran.gui.utils.vtk.vtk_utils import (
get_numpy_idtype_for_vtk, numpy_to_vtk_points, create_vtk_cells_of_constant_element_type)
from pyNastran.gui.qt_files.colors import (
RED_FLOAT, BLUE_FLOAT, GREEN_FLOAT, LIGHT_GREEN_FLOAT, PINK_FLOAT, PURPLE_FLOAT,
YELLOW_FLOAT, ORANGE_FLOAT)
from pyNastran.gui.errors import NoGeometry, NoSuperelements
from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult
from pyNastran.gui.gui_objects.displacements import ForceTableResults, ElementalTableResults
from .wildcards import IS_H5PY, GEOM_METHODS_BDF
from .beams3d import get_bar_nids, get_beam_sections_map, create_3d_beams
from .geometry_helper import NastranGeometryHelper, get_material_arrays, get_suport_node_ids
from .results_helper import NastranGuiResults, fill_responses, _get_times
from .bdf_vectorized import add_vectorized_elements
from .utils import (
build_offset_normals_dims, build_map_centroidal_result,
get_nastran_gui_layer_word, check_for_missing_control_surface_boxes,
get_elements_nelements_unvectorized, get_shell_material_coord,
make_nid_map, store_warning)
from .menus.setup_model_sidebar import ModelSidebar
if TYPE_CHECKING: # pragma: no cover
from pyNastran.gui.gui_objects.settings import Settings
SIDE_MAP = {}
SIDE_MAP['CHEXA'] = {
1 : [4, 3, 2, 1],
2 : [1, 2, 6, 5],
3 : [2, 3, 7, 6],
4 : [3, 4, 8, 7],
5 : [4, 1, 5, 8],
6 : [5, 6, 7, 8],
}
NO_THETA = [
'CELAS1', 'CELAS2', 'CELAS3', 'CELAS4',
'CDAMP1', 'CDAMP2', 'CDAMP3', 'CDAMP4', 'CDAMP5',
'CBAR', 'CBEAM', 'CBEAM3', 'CBEND',
'CBUSH', 'CBUSH1D', 'CBUSH2D', 'CVISC',
'CONROD', 'CROD', 'CTUBE', 'PLOTEL',
'CHBDYP', 'GENEL',
]
DESIRED_RESULTS = [
# nodal
# ---------
'displacements', 'velocities', 'accelerations', 'temperatures',
'constraint_forces', 'spc_forces', 'mpc_forces', 'eigenvectors',
'contact_forces', 'glue_forces',
#'gridPointForces',
#'stress',
# untested
'load_vectors',
'applied_loads',
'force_vectors',
# ---------
# centroidal
'stress',
'chexa_stress', 'cpenta_stress', 'ctetra_stress',
'ctria3_stress', 'ctria3_stress',
'cquad8_stress''cquad4_stress',
'ctria3_composite_stress', 'ctria3_composite_stress',
'cquad8_composite_stress''cquad4_composite_stress',
'cbar_stress', 'cbeam_stress',
'crod_stress', 'conrod_stress', 'ctube_stress',
'celas1_stress', 'celas2_stress', 'celas3_stress', 'celas4_stress',
#=================================================
'strain',
'chexa_strain', 'cpenta_strain', 'ctetra_strein',
'ctria3_strain', 'ctria3_strain',
'cquad8_strain', 'cquad4_strain',
'ctria3_composite_strain', 'ctria3_composite_strain',
'cquad8_composite_strain', 'cquad4_composite_strain',
'cbar_strain', 'cbeam_strain',
'crod_strain', 'conrod_strain', 'ctube_strain',
'celas1_strain', 'celas2_strain', 'celas3_strain', 'celas4_strain',
]
IS_TESTING = 'test' in sys.argv[0]
class NastranIO(NastranGuiResults, NastranGeometryHelper):
"""Defines the GUI class for Nastran."""
def __init__(self):
super(NastranIO, self).__init__()
self.nid_release_map = {}
self.make_spc_mpc_supports = True
#def __init__(self, gui):
#super(NastranIO, self).__init__()
#self.gui = gui # make sure to comment out the property on line 124
#self.nid_release_map = {}
#self.stress = {}
#self.strain = {}
def get_nastran_wildcard_geometry_results_functions(self):
"""gets the Nastran wildcard loader used in the file load menu"""
geom_methods_pch = 'Nastran Geometry - Punch (*.bdf; *.dat; *.nas; *.ecd; *.pch)'
combined_methods_op2 = 'Nastran Geometry + Results - OP2 (*.op2)'
results_fmts = ['Nastran OP2 (*.op2)',]
if IS_H5PY:
results_fmts.append('pyNastran H5 (*.h5)')
results_fmts.append('Patran nod (*.nod)')
results_fmt = ';;'.join(results_fmts)
#results_fmt = 'Nastran OP2 (*.op2)'
data_geom = (
'nastran',
GEOM_METHODS_BDF, self.load_nastran_geometry,
results_fmt, self.load_nastran_results)
data_geom_pch = (
'nastran',
geom_methods_pch, self.load_nastran_geometry,
results_fmt, self.load_nastran_results)
unused_data_geom_results = (
'nastran',
combined_methods_op2, self.load_nastran_geometry_and_results,
results_fmt, self.load_nastran_results)
return [data_geom, data_geom_pch]
#return [data_geom, data_geom_pch, data_geom_results]
def load_nastran_geometry_and_results(self, op2_filename, name='main', plot=True):
"""loads geometry and results, so you don't have to double define the same BDF/OP2"""
self.load_nastran_geometry(op2_filename, name='main', plot=False)
self.load_nastran_results(self.model) # name='main', plot=True
def _cleanup_nastran_tools_and_menu_items(self):
"""
hides the Nastran toolbar when loading another format
"""
self.nastran_tools_menu.setVisible(False)
#self.menu_help.menuAction().setVisible(True)
#self.menu_help2.menuAction().setVisible(False)
self.nastran_toolbar.setVisible(False)
self.actions['nastran'].setVisible(False)
def _create_nastran_tools_and_menu_items(self):
"""
creates the Nastran toolbar when loading a Nastran file
"""
tools = [
#('about_nastran', 'About Nastran GUI', 'tabout.png', 'CTRL+H',
#'About Nastran GUI and help on shortcuts', self.about_dialog),
#('about', 'About Orig GUI', 'tabout.png', 'CTRL+H',
#'About Nastran GUI and help on shortcuts', self.about_dialog),
]
#self.gui.menu_help2 = self.gui.menubar.addMenu('&HelpMenuNew')
#self.gui.menu_help.menuAction().setVisible(False)
if hasattr(self, 'nastran_toolbar'):
self.nastran_tools_menu.setVisible(True)
self.gui.nastran_toolbar.setVisible(True)
self.gui.actions['nastran'].setVisible(True)
else:
#self.menubar.addMenu('&File')
self.create_nastran_tools_menu(self.gui)
self.gui.nastran_toolbar = self.addToolBar('Nastran Toolbar')
self.gui.nastran_toolbar.setObjectName('nastran_toolbar')
#self.gui.nastran_toolbar.setStatusTip("Show/Hide nastran toolbar")
self.gui.actions['nastran'] = self.nastran_toolbar.toggleViewAction()
self.gui.actions['nastran'].setStatusTip("Show/Hide application toolbar")
#self.gui.file.menuAction().setVisible(False)
#self.gui.menu_help.
#self.gui.actions['about'].Disable()
menu_items = {}
menu_items['nastran_toolbar'] = (self.gui.nastran_toolbar,
('caero', 'caero_subpanels', 'conm2'))
#menu_items = [
#(self.menu_help2, ('about_nastran',)),
#(self.gui.nastran_toolbar, ('caero', 'caero_subpanels', 'conm2'))
#(self.menu_window, tuple(menu_window)),
#(self.menu_help, ('load_geometry', 'load_results', 'script', '', 'exit')),
#(self.menu_help2, ('load_geometry', 'load_results', 'script', '', 'exit')),
return tools, menu_items
def on_create_coord(self):
pass
def create_nastran_tools_menu(self, gui):
#if 'dev' not in __version__:
#return
if not hasattr(self, 'shear_moment_torque_obj'):
return
tools = [
#('script', 'Run Python Script...', 'python48.png', None, 'Runs pyNastranGUI in batch mode', self.on_run_script),
('shear_moment_torque', 'Shear, Moment, Torque...', 'python48.png', None,
'Creates a Shear, Moment, Torque Plot', self.shear_moment_torque_obj.set_shear_moment_torque_menu),
('create_coord', 'Create Coordinate System...', 'coord.png', None, 'Creates a Coordinate System', self.on_create_coord),
]
items = (
'shear_moment_torque',
'create_coord',
)
nastran_tools_menu = gui.menubar.addMenu('Tools')
gui.nastran_tools_menu = nastran_tools_menu
menu_items = {
'nastran_tools' : (nastran_tools_menu, items),
}
icon_path = ''
gui._prepare_actions_helper(icon_path, tools, self.actions, checkables=None)
gui._populate_menu(menu_items, actions=self.actions)
def toggle_caero_panels(self):
"""
Toggle the visibility of the CAERO panels. The visibility of the
sub panels or panels will be set according to the current
show_caero_sub_panels state.
"""
if not self.has_caero:
return
self.show_caero_actor = not self.show_caero_actor
names = ['caero', 'caero_subpanels', 'caero_control_surfaces']
geometry_properties = self.gui._get_geometry_properties_by_name(names)
if self.show_caero_actor:
try:
geometry_properties['caero_control_surfaces'].is_visible = True
except KeyError:
pass
if self.show_caero_sub_panels:
geometry_properties['caero_subpanels'].is_visible = True
else:
geometry_properties['caero'].is_visible = True
else:
try:
geometry_properties['caero_control_surfaces'].is_visible = False
except KeyError:
pass
geometry_properties['caero'].is_visible = False
geometry_properties['caero_subpanels'].is_visible = False
self.gui.on_update_geometry_properties_override_dialog(geometry_properties)
def _get_geometry_properties_by_name(self, names):
"""
Get a subset of the self.geometry_properties dict specified by
names. Any names not in the dict will be ignored.
Parameters
-----------
names : list [str, ...]
List of names.
Returns
--------
geometry_properties : dict {str : AltGeometry or CoordProperties}
Dictonairy from name to property object.
"""
geometry_properties = {}
for name in names:
try:
prop = self.gui.geometry_properties[name]
except KeyError:
continue
geometry_properties[name] = prop
return geometry_properties
def on_update_geometry_properties_window(self, geometry_properties):
"""updates the 'Edit Geometry Properties' window"""
self.gui.on_update_geometry_properties_window(geometry_properties)
def toggle_caero_sub_panels(self):
"""
Toggle the visibility of the CAERO sub panels
"""
if not self.has_caero:
return
names = ['caero', 'caero_subpanels']
geometry_properties = self.gui._get_geometry_properties_by_name(names)
self.show_caero_sub_panels = not self.show_caero_sub_panels
if self.show_caero_actor:
if self.show_caero_sub_panels:
geometry_properties['caero'].is_visible = False
geometry_properties['caero_subpanels'].is_visible = True
else:
geometry_properties['caero'].is_visible = True
geometry_properties['caero_subpanels'].is_visible = False
self.gui.on_update_geometry_properties_override_dialog(geometry_properties)
def toggle_conms(self):
"""
Toggle the visibility of the CONMS
"""
name = 'conm2'
if name in self.gui.geometry_actors:
geometry_properties_change = {name : self.gui.geometry_properties[name]}
visibility_prev = geometry_properties_change[name].is_visible
geometry_properties_change[name].is_visible = not visibility_prev
self.gui.on_update_geometry_properties_override_dialog(geometry_properties_change)
def _create_coord(self, dim_max, cid, coord, coord_type):
"""
Create a coordinate system
Parameters
----------
dim_max : float
the max model dimension; 10% of the max will be used for the
coord length
cid : int
the coordinate system id
coord : Coord()
the Nastran coord object
coord_type : str
a string of 'xyz', 'Rtz', 'Rtp' (xyz, cylindrical, spherical)
that changes the axis names
"""
origin = coord.origin
beta = coord.beta().T
## TODO: support FEMAP syntax
self.gui.create_coordinate_system(
cid, dim_max, label='%s' % cid, origin=origin,
matrix_3x3=beta, coord_type=coord_type)
def _create_nastran_coords(self, model, dim_max):
"""
Creates the Nastran coordinate systems.
Parameters
----------
model : BDF()
the BDF object
dim_max : float
the max model dimension; 10% of the max will be used for the
coord length
"""
cid_types = {
'R' : 'xyz',
'C' : 'Rtz',
'S' : 'Rtp',
}
self.gui.create_global_axes(dim_max)
if not self.gui.settings.nastran_create_coords:
return
for cid, coord in sorted(model.coords.items()):
if cid in [0, -1]:
continue
cid_type = cid_types[coord.Type]
self.gui._create_coord(dim_max, cid, coord, cid_type)
def _remove_old_nastran_geometry(self, bdf_filename):
"""cleans up the nastran model"""
#return self._remove_old_geometry(bdf_filename)
# skip_reading = self.removeOldGeometry(bdf_filename)
skip_reading = False
if bdf_filename is None or bdf_filename == '':
#self.grid = vtk.vtkUnstructuredGrid()
#self.scalar_bar_actor.VisibilityOff()
skip_reading = True
return skip_reading
else:
self.gui.turn_text_off()
self.gui.grid.Reset()
#self.gui.eid_map = {}
#self.gui.nid_map = {}
self.gui.result_cases = {}
self.gui.ncases = 0
# TODO: is this doing anything?
for name in ('case_keys', 'icase', 'isubcase_name_map'):
if hasattr(self, name):
del name
return skip_reading
def get_xyz_in_coord(self, model, cid=0, fdtype: str='float32', check_mirror: bool=True):
"""
Creates the grid points efficiently
Used by ``load_nastran_geometry_unvectorized``
"""
xyz_cid0, nid_cp_cd, icd_transform = build_superelement_model(model, cid=cid, fdtype=fdtype)
if len(xyz_cid0) == 1:
super_id = 0
nid_mapi = self.gui.nid_map
make_nid_map(nid_mapi, nid_cp_cd[super_id][:, 0])
self._add_nastran_spoints_to_grid(model.spoints, nid_mapi)
self.icd_transform = icd_transform[super_id]
return xyz_cid0[super_id], nid_cp_cd[super_id]
# superelements
self.icd_transform = icd_transform
xyz_cid0_full = []
nid_cp_cd_full = []
for super_id, xyz_cid0i in sorted(xyz_cid0.items()):
xyz_cid0_full.append(xyz_cid0[super_id])
nid_cp_cd_full.append(nid_cp_cd[super_id])
xyz_cid0_out = np.vstack(xyz_cid0_full)
nid_cp_cd_out = np.vstack(nid_cp_cd_full)
all_nids = nid_cp_cd_out[:, 0]
unids = np.unique(all_nids)
log = self.log
if not len(all_nids) == len(unids):
if model.sebulk and check_mirror:
from pyNastran.bdf.mesh_utils.bdf_renumber import superelement_renumber
bdf_filename_out = 'spike.bdf'
unused_model = superelement_renumber(
model, bdf_filename_out=bdf_filename_out,
size=8, is_double=False, starting_id_dict=None,
cards_to_skip=None, log=None, debug=False)
_model2 = BDF(debug=None, log=log, mode='msc')
_model2.read_bdf(bdf_filename=bdf_filename_out,
validate=False, xref=False, punch=False, read_includes=True,
save_file_structure=False, encoding=model._encoding)
model.uncross_reference()
model.nodes = _model2.nodes
model.elements = _model2.elements
model.properties = _model2.properties
model.materials = _model2.materials
model.loads = _model2.loads
model.seloc = _model2.seloc
model.superelement_models = _model2.superelement_models
#model.write_bdf('spike2.bdf')
#os.remove('spike2.bdf')
xref_nodes = True
xref_loads = True
model.safe_cross_reference(
xref=True,
xref_nodes=xref_nodes,
xref_elements=True,
xref_nodes_with_elements=False,
xref_properties=True,
xref_masses=True,
xref_materials=False,
xref_loads=xref_loads,
xref_constraints=False,
xref_optimization=False,
xref_aero=True,
xref_sets=False,
create_superelement_geometry=False,
)
#from pyNastran.bdf.mesh_utils.bdf_renumber import (
#bdf_renumber, get_starting_ids_dict_from_mapper)
#starting_id_dict = { # todo: hardcoded
#'nid' : unids.max(),
#'eid' : 100000,
#'cid' : 100000,
#'pid' : 100000,
#}
#for seid, sebulk in sorted(model.sebulk.items()):
#if sebulk.Type == 'MIRROR':
#print('renumbering mirror seid=%s -> %s' % (sebulk.rseid, seid))
#superelement = model.superelement_models[seid]
#bdf_filename_out = 'super_%i.bdf' % seid
#_model, mapper = bdf_renumber(
#superelement, bdf_filename_out, size=8, is_double=False,
#starting_id_dict=starting_id_dict, round_ids=False,
#cards_to_skip=None, log=log, debug=False)
#starting_id_dict = get_starting_ids_dict_from_mapper(
#_model, mapper)
#superelement2 = BDF(debug=True, log=log, mode='msc')
#superelement2.read_bdf(bdf_filename_out)
#model.superelement_models[seid] = superelement2
##os.remove(bdf_filename_out)
#else: # pragma: no cover
#raise NotImplementedError(sebulk)
#model.write_bdf('spike.bdf')
return self.get_xyz_in_coord(model, cid=0, fdtype=fdtype, check_mirror=False)
msg = ('superelement nodes are not unique; use superelement_renumber\n'
'renumbering; duplicate nids=\n%s' % duplicates(all_nids))
raise NotImplementedError(msg)
if not is_monotonic(all_nids):
#msg = ('superelement nodes are not monotonic; use superelement_renumber\n'
#'renumbering; nids=\n%s' % all_nids)
#self.log.warning(msg)
isort = np.argsort(all_nids)
xyz_cid0_out = xyz_cid0_out[isort, :]
nid_cp_cd_out = nid_cp_cd_out[isort, :]
make_nid_map(self.gui.nid_map, nid_cp_cd_out[:, 0])
return xyz_cid0_out, nid_cp_cd_out
def get_xyz_in_coord_vectorized(self, model, cid=0, fdtype='float32'):
"""
Creates the grid points efficiently
Used by ``load_nastran_geometry_vectorized``
"""
xyz_cid0 = None
nid_cp_cd = None
if self.gui.nnodes > 0:
#xyz_cid0 = {}
#nid_cp_cd = {}
out = model.get_displacement_index_xyz_cp_cd(
fdtype=fdtype, idtype='int32')
icd_transform, icp_transform, xyz_cp, nid_cp_cd = out
self.icd_transform = icd_transform
#print("transform_xyzcp_to_xyz_cid")
#model.nodes.cp = nid_cp_cd[:, 1]
xyz_cid0 = model.transform_xyzcp_to_xyz_cid(
xyz_cp, nid_cp_cd[:, 0], icp_transform, cid=cid,
in_place=False)
model.nodes.xyz_cid0 = xyz_cid0
model.nodes.nids = nid_cp_cd[:, 0]
nid_map = self.gui.nid_map
for i, nid in enumerate(nid_cp_cd[:, 0]):
nid_map[nid] = i
self._add_nastran_spoints_to_grid(model.spoints, nid_map)
return xyz_cid0, nid_cp_cd
def _get_model_unvectorized(self, bdf_filename, xref_loads=True):
"""Loads the BDF/OP2 geometry"""
ext = '.bdf'
if isinstance(bdf_filename, str):
ext = os.path.splitext(bdf_filename)[1].lower()
elif isinstance(bdf_filename, BDF):
model = bdf_filename
xref_nodes = True
return model, xref_nodes
punch = False
if ext == '.pch':
punch = True
log = self.gui.log
self.model_type = 'nastran'
if ext == '.op2':
model = OP2Geom(make_geom=True, debug=False, log=log,
debug_file=None)
model.clear_results()
model.IS_TESTING = False
model.read_op2(op2_filename=bdf_filename)
elif ext == '.h5' and IS_H5PY:
model = BDF(log=log, debug=True)
model.load_hdf5_filename(bdf_filename)
model.validate()
elif ext == '.obj':
model = BDF(log=log, debug=True)
model.load(obj_filename=bdf_filename)
else: # read the bdf/punch
model = BDF(log=log, debug=True)
model.read_bdf(bdf_filename,
punch=punch, xref=False,
validate=True)
#print('done with read_bdf')
#xref_loads = False
#xref_aero = len(model.caeros) > 0
xref_nodes = True
#model.cross_reference()
model.safe_cross_reference(
xref=True,
xref_nodes=xref_nodes,
xref_elements=True,
xref_nodes_with_elements=False,
xref_properties=True,
xref_masses=True,
xref_materials=False,
xref_loads=xref_loads,
xref_constraints=False,
xref_optimization=False,
xref_aero=True,
xref_sets=False,
create_superelement_geometry=True,
)
return model, xref_nodes
def load_nastran_geometry(self, bdf_filename, name='main', plot=True, **kwargs):
"""
The entry point for Nastran geometry loading.
Parameters
----------
bdf_filename : varies
str: the Nastran filename to load
model : the BDF object
name : str
the name of the "main" actor for the GUI
plot : bool; default=True
should the model be generated or should we wait until
after the results are loaded
kwargs:
-------
is_geometry_results : bool; default=True
code is being called from load_nastran_geometry_and_results
not used...
"""
self.gui.eid_maps[name] = {}
self.gui.nid_maps[name] = {}
self.icd_transform = {}
#self.transforms = {}
#print('bdf_filename=%r' % bdf_filename)
#key = self.case_keys[self.icase]
#case = self.result_cases[key]
skip_reading = self._remove_old_nastran_geometry(bdf_filename)
# if 0:
# line_width = 3
# opacity = 1
# alt_grids = [
# ['caero', yellow, line_width, opacity],
# ['caero_subpanels', yellow, line_width, opacity],
# ]
# skip_reading = self._remove_old_geometry2(bdf_filename, alt_grids=alt_grids)
if skip_reading:
return
#load_geom = True
if isinstance(bdf_filename, str) and bdf_filename.lower().endswith(('.bdf', '.dat', '.pch',)): # '.op2'
# if we're running test_pynastrangui or we have the --test flag on the command line
# this has (technically) nothing to do with if we're running the tests or not
if IS_TESTING or self.gui.is_testing_flag:
try:
self.load_nastran_geometry_vectorized(bdf_filename, plot=plot)
except NoSuperelements:
self.log.error('\n' + traceback.format_exc())
self.load_nastran_geometry_unvectorized(bdf_filename, plot=plot)
else:
self.load_nastran_geometry_unvectorized(bdf_filename, plot=plot)
#self.load_nastran_geometry_vectorized(bdf_filename, plot=plot)
else:
self.load_nastran_geometry_unvectorized(bdf_filename, plot=plot)
self.gui.format = 'nastran'
def load_nastran_geometry_vectorized(self, bdf_filename, plot=True):
"""
The entry point for Nastran geometry loading.
Parameters
----------
bdf_filename : str
the Nastran filename to load
plot : bool; default=True
should the model be generated or should we wait until
after the results are loaded
"""
model_name = 'main'
#self.isubcase_name_map[None] = ['a', 'b']
reset_labels = True
if plot:
self.gui.scalar_bar_actor.VisibilityOff()
self.gui.scalar_bar_actor.Modified()
model = self._get_model_vectorized(bdf_filename)
nnodes = len(model.grid)
nspoints = len(model.spoints)
nepoints = len(model.epoints)
ncaero_cards = len(model.caeros)
ngridb = len(model.gridb)
#if model.spoints:
#spoints = sorted([spoint.nid for spoint in model.spoints.values()])
#if model.epoints:
#epoints = sorted([epoint.nid for epoint in model.epoints.values()])
ngui_nodes = nnodes + nspoints + nepoints + ngridb
if ngui_nodes + ncaero_cards == 0:
msg = 'nnodes + nspoints + nepoints = 0\n'
msg += 'card_count = %r' % str(model.card_count)
raise NoGeometry(msg)
nelements2 = len(model.elements2)
#nelements = len(model.elements) + nelements2
nelements = nelements2
nmasses = len(model.masses)
nplotels = len(model.plotels)
nrigid = len(model.rigid_elements)
#nmpc = len(model.mpcs) # really should only be allowed if we have it in a subcase
if len(model.superelement_models):
raise NoSuperelements('superelements are not supported in vectorized BDF')
if nelements + nmasses + ncaero_cards + nplotels + nrigid == 0:
msg = 'nelements + nmasses + ncaero_cards + nplotels + nrigid = 0\n'
msg += 'card_count = %r' % str(model.card_count)
raise NoGeometry(msg)
self.gui.nnodes = ngui_nodes
self.gui.nelements = nelements # approximate...
self.gui.log_info("nnodes=%i nelements=%i" % (self.nnodes, self.nelements))
msg = model.get_bdf_stats(return_type='string')
self.gui.log_debug(msg)
msg = model.get_bdf_stats(return_type='list')
# this call will break the GUI if there are a lot of lines and
# by a lot I mean 37641. It's fine for a single call.
#for msgi in msg:
#model.log.debug(msgi)
nconm2 = 0
#if 'CONM2' in model.card_count:
#nconm2 += model.card_count['CONM2']
#if 'CMASS1' in model.card_count:
#nconm2 += model.card_count['CMASS1']
#if 'CMASS2' in model.card_count:
#nconm2 += model.card_count['CMASS2']
if nconm2 > 0:
self.gui.create_alternate_vtk_grid(
'conm2', color=ORANGE_FLOAT, line_width=5, opacity=1., point_size=4,
representation='point', follower_function=None)
# Allocate grids
self.gui.grid.Allocate(self.nelements, 1000)
#self._create_caero_actors(ncaeros, ncaeros_sub, ncaeros_cs, has_control_surface)
#if nconm2 > 0:
#self.gui.alt_grids['conm2'].Allocate(nconm2, 1000)
if self.save_data:
self.model = model
#-----------------------------------------------------------------------
# nodes/coords
#print('get_xyz_in_coord')
dim_max = 1.0
xyz_cid0, nid_cp_cd = self.get_xyz_in_coord_vectorized(model, cid=0, fdtype='float32')
if xyz_cid0 is not None:
dim_max = self._points_to_vtkpoints_coords(model, xyz_cid0)
#-----------------------------------------------------------------------
#------------------------------------------------------------
# TEMP
j = 0
results = self._map_elements_vectorized(self.nid_map, model, j, dim_max,
nid_cp_cd, plot=True, xref_loads=True)
has_control_surface = False
geometry_names = []
#------------------------------------------------------------
cases = OrderedDict()
form = ['Geometry', None, []]
form0 = form[2]
subcase_id = 0
colormap = self.gui.settings.colormap
if self.gui.nnodes > 0:
icase = 0
all_nids = nid_cp_cd[:, 0]
self.gui.node_ids = all_nids
nid_res = GuiResult(subcase_id, 'NodeID', 'NodeID', 'node', all_nids,
mask_value=0,
nlabels=None,
labelsize=None,
ncolors=None,
colormap=colormap,
data_format=None,
uname='GuiResult')
cases[icase] = (nid_res, (0, 'Node ID'))
form0.append(('Node ID', icase, []))
icase += 1
nid_res = GuiResult(subcase_id, 'iNode', 'iNode', 'node',
np.arange(len(all_nids), dtype='int32'),
mask_value=0,
nlabels=None,
labelsize=None,
ncolors=None,
colormap=colormap,
data_format=None,
uname='GuiResult')
cases[icase] = (nid_res, (0, 'Node ID'))
form0.append(('iNode', icase, []))
icase += 1
# this intentionally makes a deepcopy
cds = np.array(nid_cp_cd[:, 2])
if cds.max() > 0:
cd_res = GuiResult(0, header='NodeCd', title='NodeCd',
location='node', scalar=cds, colormap=colormap)
cases[icase] = (cd_res, (0, 'NodeCd'))
form0.append(('NodeCd', icase, []))
icase += 1
if self.gui.nelements > 0:
eids_array = results['eid']
eid_res = GuiResult(subcase_id, 'ElementID', 'ElementID', 'centroid', eids_array,
mask_value=0,
nlabels=None,
labelsize=None,
ncolors=None,
colormap=colormap,
data_format=None,
uname='GuiResult')
cases[icase] = (eid_res, (0, 'ElementID'))
form0.append(('ElementID', icase, []))
icase += 1
eids_array = results['eid']
eid_res = GuiResult(subcase_id, 'iElement', 'iElement', 'centroid',
np.arange(len(eids_array), dtype='int32'),
mask_value=-1,
nlabels=None,
labelsize=None,
ncolors=None,
colormap=colormap,
data_format=None,
uname='GuiResult')
cases[icase] = (eid_res, (0, 'iElement'))
form0.append(('iElement', icase, []))
icase += 1
#is_element_dim = True
dim_array = results['dim']
if len(np.unique(dim_array)) > 1:
dim_res = GuiResult(subcase_id, 'ElementDim', 'ElementDim', 'centroid', dim_array,
mask_value=-1,
nlabels=None,
labelsize=None,
ncolors=None,
colormap=colormap,
data_format=None,
uname='GuiResult')
cases[icase] = (dim_res, (0, 'ElementDim'))
form0.append(('ElementDim', icase, []))
icase += 1
nnodes_array = results['nnodes']
if nnodes_array.max() > -1:
nnodes_res = GuiResult(subcase_id, 'NNodes/Elem', 'NNodes/Elem',
'centroid', nnodes_array,
mask_value=-1,
nlabels=None,
labelsize=None,
ncolors=None,
colormap=colormap,
data_format=None,
uname='GuiResult')
cases[icase] = (nnodes_res, (0, 'NNodes/Elem'))
form0.append(('NNodes/Elem', icase, []))
icase += 1
pids_array = results['pid']
pid_res = GuiResult(0, header='PropertyID', title='PropertyID',
location='centroid', scalar=pids_array, mask_value=0)
cases[icase] = (pid_res, (0, 'PropertyID'))
form0.append(('PropertyID', icase, []))
icase += 1
#upids = np.unique(pids_array)
unused_mid_eids_skip = []
pcomp_nplies = 0
nplies = 1
is_pshell = False
is_pcomp = False
if 'PSHELL' in model.card_count:
nplies = 4
is_pshell = True
for pid in model.get_card_ids_by_card_types(['PCOMP', 'PCOMPG'], combine=True):
prop = model.properties[pid]
pcomp_nplies = max(pcomp_nplies, prop.nplies)
is_pcomp = True
is_pshell_pcomp = (is_pshell, is_pcomp)
nplies = max(nplies, pcomp_nplies + 1)
mids = np.zeros((nelements, nplies), dtype='int32')
thickness = np.full((nelements, nplies), np.nan, dtype='float32')
#rho = np.full((nelements, nplies), np.nan, dtype='float32')
nplies = np.zeros(nelements, dtype='int32')
# materials
upids = np.unique(pids_array)
ipids = np.zeros(len(pids_array), dtype='int32')
iupid = 0
for upid in upids: # upid_old
if upid == 0:
# elements w/o properties
continue
ipid = np.where(pids_array == upid)[0]
ipids[ipid] = iupid
if len(ipid):
try:
prop = model.properties[upid]
except KeyError:
raise KeyError('pid=%r properties=%s' % (upid, str(model.properties)))
if prop.type == 'PSHELL':
nplies[ipid] = 4
thickness[ipid, 0] = prop.Thickness()
elif prop.type in ['PCOMP', 'PCOMPG']:
nplies[ipid] = prop.nplies
for iply in range(prop.nplies):
mids[ipid, iply+1] = prop.Mid(iply)
thickness[ipid, iply+1] = prop.Thickness(iply)
else:
self.log.error(f'skipping setting mids (vectorized) for {prop.type}')
iupid += 1
if len(model.conrod):
#mids[ieid, 0] = 42
pass
pid_res = GuiResult(0, header='iProperty', title='iProperty',
location='centroid', scalar=ipids, colormap=colormap)
cases[icase] = (pid_res, (0, 'iProperty'))
form0.append(('iProperty', icase, []))
icase += 1
#if nplies.max() > 0:
#nplies_res = GuiResult(0, header='Number of Plies', title='nPlies',
#location='centroid', scalar=nplies, mask_value=0)
#cases[icase] = (nplies_res, (0, 'Number of Plies'))
#form0.append(('Number of Plies', icase, []))
#icase += 1
pshell = {
'mids' : mids,
'thickness' : nplies,
}
pcomp = {
'mids' : mids,
'thickness' : nplies,
'nplies' : nplies,
}
icase = _build_materials(model, pshell, pcomp, is_pshell_pcomp,
cases, form0, icase)
#------------------------------------------------------------
# add alternate actors
self.gui._add_alt_actors(self.gui.alt_grids)
# set default representation
self._set_caero_representation(has_control_surface)
for grid_name in geometry_names:
if grid_name in self.gui.geometry_actors:
self.gui.geometry_actors[grid_name].Modified()
#self.gui.grid_mapper.SetResolveCoincidentTopologyToPolygonOffset()
if 0:
if plot:
self.gui._finish_results_io2(model_name, [form], cases, reset_labels=reset_labels)
else:
self.gui._set_results([form], cases)
def _map_elements_vectorized(self, unused_nid_map, model, unused_j, unused_dim_max,
unused_nid_cp_cd, plot=True, xref_loads=True):
"""
Much, much faster way to add elements that directly builds the
VTK objects rather than using for loops.
Parameters
----------
nid_map : ???
???
model : BDF()
the BDF model object
j : int
???
dim_max : float
???
nid_cp_cd : ???
???
plot : bool; default=True
???
xref_loads : bool; default=True
???
Returns
-------
nid_to_pid_map : dict
node to property id map
used to show SPC constraints (we don't want to show constraints
on 456 DOFs)
icase : int
the result number
cases : dict
the GuiResult objects
form : List[???, ???, ???]
the Results sidebar data
TDOO: Not quite done on:
- ???
"""
self.gui.isubcase_name_map = {1: ['Nastran', '']}
grid = self.gui.grid
nelements = self.nelements
if nelements == 0:
return None
idtype = get_numpy_idtype_for_vtk()
log = self.log
cell_types_array, cell_offsets_array, nids_list, eids_array, results = add_vectorized_elements(
model, nelements, idtype, log)
if cell_types_array.min() == 0:
# all the non-elemental cards should be listed
# it's not hugely important, but it cleans up dev error messages
skip_cards = [
'CONM2',
#'CELAS1', 'CELAS2', 'CELAS3', 'CELAS4', 'PLOTEL',
'PARAM',
#'CDAMP1', 'CDAMP2', 'CDAMP3', 'CDAMP4', 'CVISC',
'TABLEM1', 'TABLEM2', 'TABLEM3', 'TABLEM4',
'TABLED1', 'TABLED2', 'TABLED3', 'TABLED4', 'TABLEST',
'MAT1', 'MAT2', 'MAT4', 'MAT5', 'MAT8', 'MAT9', 'MAT10',
'MATT1', 'MATT2', 'MATT8',
'MATS1', 'MATHP',
'PLOAD', 'PLOAD1', 'PLOAD2', 'FORCE', 'PLOAD4', 'LOAD',
'MAT1', 'PSHEAR', 'PSHELL', 'PTUBE', 'PDAMP',
'PELAST', 'PBEND', 'PBEAM', 'PCOMP', 'PCOMPG', 'PBAR', 'PSOLID',
'PLPLANE', 'PLSOLID',
'PROD', 'PELAS', 'PVISC', 'PBUSH1D', 'PBUSH2D',
#'EPOINT',
#'CQUADR', 'CTRIAR', 'SPOINT',
#'CQUAD8', 'CTRIA6',
'ENDDATA',
'CORD2R', 'CORD2C', 'CORD2S', 'CORD1R', 'CORD1C', 'CORD1S',
'GRID', 'SPOINT', 'EPOINT', 'TF',
'RFORCE', 'RFORCE1', 'RFORCE2', 'FORCE', 'FORCE1', 'FORCE2',
'MOMENT', 'MOMENT1', 'MOMENT2', 'PLOAD', 'PLOAD1', 'PLOAD2', 'PLOAD4',
'LOAD', 'TLOAD1', 'TLOAD2', 'DLOAD', 'LSEQ', 'DAREA',
'RLOAD1', 'RLOAD2',
'SUPORT', 'SUPORT1', 'MPC', 'MPCADD', 'RBE1', 'RBE2', 'RBE3', 'RBAR', 'RCROSS',
'SPCADD', 'SPC', 'SPC1', 'SPCD', 'SPCAX', 'DMIG', 'DMI', 'DMIJ', 'DMIJI', 'DMIK',
'AELIST', 'AELINK', 'AESURF', 'AESURFS', 'AERO', 'AEROS', 'TRIM',
'FLUTTER', 'DIVERG',
'CAERO1', 'CAERO2', 'CAERO3', 'CAERO4', 'CAERO5',
'PAERO1', 'PAERO2', 'PAERO3', 'PAERO4', 'PAERO5',
'SPLINE1', 'SPLINE2', 'SPLINE3', 'SPLINE4', 'SPLINE5', 'SPLINE6', 'SPLINE7',
'CLOAD', 'TABLES1', 'NLPARM', 'GRDSET',
]
potential_elements_found = [key for key in model.card_count if key not in skip_cards]
etypes = [
'CELAS1', 'CELAS2', 'CELAS3', 'CELAS4',
'CDAMP1', 'CDAMP2', 'CDAMP3', 'CDAMP4', 'CDAMP5', 'CVISC',
'CBUSH', 'CBUSH1D', 'CBUSH2D',
'CONROD', 'CROD', 'CTUBE', 'PLOTEL',
'CBAR', 'CBEAM', 'CBEND',
'CSHEAR',
'CTRIA3', 'CQUAD4', 'CTRIA6', 'CQUAD8', 'CTRIAR', 'CQUADR',
'CTETRA', 'CPENTA', 'CHEXA', 'CPYRAM',
'CHBDYG', 'CHBDYE', 'CHBDYP',
]
for key in potential_elements_found:
if key not in etypes:
log.warning('is %s an element?' % key)
msg = (
'Cell Type is not defined (cell_type=0).\n'
' cell_types_array = %s\n'
' potential_elements_found=[%s]\n'
' nelements=%s\n\n'
'%s\n\n' % (
cell_types_array,
', '.join(potential_elements_found),
len(cell_types_array),
'', #str(model.elements2),
)
)
print(str(model.elements2))
#msg += model.get_bdf_stats()
raise RuntimeError(msg)
deep = 1
if len(nids_list) == 1:
nids_array = nids_list[0].ravel()
else:
#raise NotImplementedError(len(nids_list))
nids_array = np.hstack([nid_list.flatten() for nid_list in nids_list])
#nids_array = np.array(nids_list, dtype=dtype)
#-----------------------------------------------------------------
# saving some data members
self.gui.element_ids = eids_array
#-----------------------------------------------------------------
# build the grid
#self.log.info('nids_array = %s' % nids_array)
#self.log.info('cell_offsets_array = %s' % cell_offsets_array)
#self.log.info('cell_types_array = %s' % cell_types_array)
# Create the array of cells
#print('nids_array =', nids_array)
cells_id_type = numpy_to_vtkIdTypeArray(nids_array, deep=1)
vtk_cells = vtk.vtkCellArray()
vtk_cells.SetCells(nelements, cells_id_type)
# Cell types
vtk_cell_types = numpy_to_vtk(
cell_types_array, deep=deep,
array_type=vtk.vtkUnsignedCharArray().GetDataType())
vtk_cell_offsets = numpy_to_vtk(cell_offsets_array, deep=deep,
array_type=vtk.VTK_ID_TYPE)
grid = self.gui.grid
#grid = vtk.vtkUnstructuredGrid()
grid.SetCells(vtk_cell_types, vtk_cell_offsets, vtk_cells)
return results
def _get_model_vectorized(self, bdf_filename):
"""Loads the BDF/OP2 geometry"""
ext = os.path.splitext(bdf_filename)[1].lower()
punch = False
if ext == '.pch':
punch = True
self.model_type = 'nastran'
log = self.log
if ext == '.op2':
from pyNastran.dev.bdf_vectorized2.op2_geom_vectorized import (
OP2Geom as OP2Geom_)
model = OP2Geom_(make_geom=True, debug=False, log=log,
debug_file=None)
model.clear_results()
model.read_op2(op2_filename=bdf_filename)
else: # read the bdf/punch
from pyNastran.dev.bdf_vectorized2.bdf_vectorized import BDF as BDF_
model = BDF_(log=log, debug=True)
# static_elements.bdf
#skip_cards = [
#'CELAS1', 'CELAS2', 'CELAS3', 'CELAS4', 'PLOTEL', 'PARAM',
#'CDAMP1', 'CDAMP2', 'CDAMP3', 'CDAMP4', 'CVISC',
#'TABLEM1', 'TABLEM2', 'TABLEM3', 'TABLEM4',
#'TABLED1', 'TABLED2', 'TABLED3', 'TABLED4',
#'PLOAD', 'PLOAD1', 'PLOAD2', 'FORCE', 'PLOAD4', 'LOAD',
#'SPCADD', 'MAT1', 'PSHEAR', 'PSHELL', 'PTUBE', 'PDAMP',
#'SPC1', 'CONM2', 'PELAST', 'PBEND', 'PBEAM', 'PCOMP', 'PCOMPG', 'PBAR', 'PSOLID',
#'PBUSH1D',
#'EPOINT',
#'CQUADR', 'CTRIAR', 'SPOINT', 'PROD', 'PELAS', 'PVISC',
#'CQUAD8', 'CTRIA6',
#]
#model.disable_cards(skip_cards)
model.read_bdf(bdf_filename,
punch=punch, xref=False,
validate=True)
#print(list(key for key in model.card_count.keys() if key not in skip_cards))
#xref_loads = False
#xref_aero = len(model.caeros) > 0
#model.cross_reference(
#xref=True,
#xref_nodes=True,
#xref_elements=False,
#xref_nodes_with_elements=False,
#xref_properties=True,
#xref_masses=True,
#xref_materials=False,
#xref_loads=xref_loads,
#xref_constraints=False,
#xref_optimization=False,
#xref_aero=False,
#xref_sets=False,
#)
return model
def _points_to_vtkpoints_coords(self, model, xyz_cid0):
"""
helper method for:
- load_nastran_geometry_unvectorized
- load_nastran_geometry_vectorized
"""
points = numpy_to_vtk_points(xyz_cid0)
self.gui.grid.SetPoints(points)
self.xyz_cid0 = xyz_cid0
maxi = xyz_cid0.max(axis=0)
mini = xyz_cid0.min(axis=0)
assert len(maxi) == 3, len(maxi)
xmax, ymax, zmax = maxi
xmin, ymin, zmin = mini
dim_max = max(xmax-xmin, ymax-ymin, zmax-zmin)
#print('_create_nastran_coords')
self._create_nastran_coords(model, dim_max)
#print('done _create_nastran_coords')
self.gui.log_info("xmin=%s xmax=%s dx=%s" % (xmin, xmax, xmax-xmin))
self.gui.log_info("ymin=%s ymax=%s dy=%s" % (ymin, ymax, ymax-ymin))
self.gui.log_info("zmin=%s zmax=%s dz=%s" % (zmin, zmax, zmax-zmin))
return dim_max
def load_nastran_geometry_unvectorized(self, bdf_filename, plot=True):
"""
The entry point for Nastran geometry loading.
Parameters
----------
bdf_filename : str
the Nastran filename to load
plot : bool; default=True
should the model be generated or should we wait until
after the results are loaded
"""
model_name = 'main'
reset_labels = True
if plot:
self.gui.scalar_bar_actor.VisibilityOff()
self.gui.scalar_bar_actor.Modified()
xref_loads = True # should be True
model, xref_nodes = self._get_model_unvectorized(bdf_filename, xref_loads=xref_loads)
nnodes = len(model.nodes)
nspoints = len(model.spoints)
nepoints = len(model.epoints)
ngridb = len(model.gridb)
ncaero_cards = len(model.caeros)
for superelement in model.superelement_models.values():
nnodes += len(superelement.nodes)
nspoints += len(superelement.spoints)
nepoints += len(superelement.epoints)
ngridb += len(superelement.gridb)
ncaero_cards += len(superelement.caeros)
ngui_nodes = nnodes + nspoints + nepoints + ngridb
if ngui_nodes + ncaero_cards == 0:
msg = 'nnodes + nspoints + nepoints = 0\n'
msg += 'card_count = %r' % str(model.card_count)
raise NoGeometry(msg)
nelements = len(model.elements)
nmasses = len(model.masses)
nplotels = len(model.plotels)
nrigid = len(model.rigid_elements)
for superelement in model.superelement_models.values():
nelements += len(superelement.elements)
nmasses += len(superelement.masses)
nplotels += len(superelement.plotels)
nrigid += len(superelement.rigid_elements)
#nmpc = len(model.mpcs) # really should only be allowed if we have it in a subcase
if nelements + nmasses + ncaero_cards + nplotels + nrigid == 0:
msg = 'nelements + nmasses + ncaero_cards + nplotels + nrigid = 0\n'
msg += 'card_count = %r' % str(model.card_count)
raise NoGeometry(msg)
self.nnodes = ngui_nodes
self.nelements = nelements # approximate...
out = self.make_caeros(model)
(has_caero, caero_points, ncaeros, ncaeros_sub, ncaeros_cs,
ncaeros_points, ncaero_sub_points,
has_control_surface, box_id_to_caero_element_map, cs_box_ids) = out
self.has_caero = has_caero
self.gui.log_info("nnodes=%i nelements=%i" % (self.nnodes, self.nelements))
msg = model.get_bdf_stats(return_type='string')
self.gui.log_debug(msg)
msg = model.get_bdf_stats(return_type='list')
# this call will break the GUI if there are a lot of lines and
# by a lot I mean 37641. It's fine for a single call.
#for msgi in msg:
#model.log.debug(msgi)
nconm2 = self._create_masses(model)
# Allocate grids
self.gui.grid.Allocate(self.nelements, 1000)
self._create_caero_actors(ncaeros, ncaeros_sub, ncaeros_cs, has_control_surface)
if nconm2 > 0:
self.gui.alt_grids['conm2'].Allocate(nconm2, 1000)
if self.save_data:
self.model = model
#-----------------------------------------------------------------------
# nodes/coords
#print('get_xyz_in_coord')
dim_max = 1.0
xyz_cid0 = None
nid_cp_cd = None
if self.gui.nnodes:
xyz_cid0, nid_cp_cd = self.get_xyz_in_coord(model, cid=0, fdtype='float32')
dim_max = self._points_to_vtkpoints_coords(model, xyz_cid0)
#-----------------------------------------------------------------------
j = 0
nid_map = self.gui.nid_map
nid_to_pid_map, icase, cases, form = self.map_elements(
xyz_cid0, nid_cp_cd, nid_map, model, j, dim_max,
plot=plot, xref_loads=xref_loads)
self._create_aero(model, box_id_to_caero_element_map, cs_box_ids,
caero_points, ncaeros_points, ncaero_sub_points,
has_control_surface)
if nconm2 > 0 and xref_nodes:
self._set_conm_grid(nconm2, model)
geometry_names = []
if self.make_spc_mpc_supports and xref_nodes:
geometry_names = self.set_spc_mpc_suport_grid(model, nid_to_pid_map)
if xref_nodes and self.gui.settings.nastran_is_bar_axes:
icase = self._fill_bar_yz(dim_max, model, icase, cases, form)
assert icase is not None
#------------------------------------------------------------
#print('dependent_nodes =', self.dependents_nodes)
icase = self._set_subcases_unvectorized(model, form, cases, icase, xref_nodes, xref_loads)
name = 'main_copy'
self.gui.duplicate_alternate_vtk_grid(
name, 'main', color=(0., 0., 0.), line_width=5,
opacity=0.1, is_visible=False)
#------------------------------------------------------------
# add alternate actors
self.gui._add_alt_actors(self.gui.alt_grids)
# set default representation
self._set_caero_representation(has_control_surface)
for grid_name in geometry_names:
if grid_name in self.gui.geometry_actors:
self.gui.geometry_actors[grid_name].Modified()
#self.grid_mapper.SetResolveCoincidentTopologyToPolygonOffset()
stop_on_failure = IS_TESTING
build_map_centroidal_result(model, nid_map, stop_on_failure=stop_on_failure)
if not IS_TESTING and 'dev' in __version__:
self.sidebar_nastran = ModelSidebar(self.gui, nastran_io=self)
self.sidebar_nastran.set_model(model)
self.res_dock_nastran = QDockWidget("Nastran Model", self)
self.res_dock_nastran.setObjectName("nastran_model")
self.res_dock_nastran.setWidget(self.sidebar_nastran)
self.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.res_dock_nastran)
#self.res_dock.setWidget(self.res_widget)
if plot:
self.gui._finish_results_io2(model_name, [form], cases, reset_labels=reset_labels)
else:
self.gui._set_results([form], cases)
def _create_masses(self, model: BDF):
nconm2 = 0
if 'CONM2' in model.card_count:
nconm2 += model.card_count['CONM2']
if 'CMASS1' in model.card_count:
nconm2 += model.card_count['CMASS1']
if 'CMASS2' in model.card_count:
nconm2 += model.card_count['CMASS2']
# CMASS3, CMASS4 are applied to SPOINTs
if nconm2 == 0:
return nconm2
gui = self.gui
def update_conm2s_function(unused_nid_map, unused_ugrid, points, nodes):
if not gui.settings.nastran_is_update_conm2:
return
j2 = 0
mass_grid = gui.alt_grids['conm2']
for unused_eid, element in sorted(model.masses.items()):
if isinstance(element, CONM2):
nid = element.nid
inid = np.searchsorted(self.node_ids, nid)
xyz_nid = nodes[inid, :]
centroid = element.offset(xyz_nid)
points.SetPoint(j2, *centroid)
elif element.type in ('CMASS1', 'CMASS2'):
n1, n2 = element.nodes
factor = 0.
if element.nodes[0] is not None:
inid = np.searchsorted(self.node_ids, n1)
p1 = nodes[inid, :]
factor += 1.
if element.nodes[1] is not None:
inid = np.searchsorted(self.node_ids, n2)
p2 = nodes[inid, :]
factor += 1.
centroid = (p1 + p2) / factor
points.SetPoint(j2, *centroid)
elem = vtk.vtkVertex()
elem.GetPointIds().SetId(0, j2)
mass_grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
else:
continue
#self.gui.log_info("skipping %s" % element.type)
j2 += 1
return
gui.create_alternate_vtk_grid(
'conm2', color=ORANGE_FLOAT, line_width=5, opacity=1., point_size=4,
follower_function=update_conm2s_function,
representation='point')
return nconm2
def update_caeros(self, obj):
"""the update call for the ModifyMenu"""
model = self.model # type: BDF
xref_errors = {}
model._uncross_reference_aero()
model._cross_reference_aero(check_caero_element_ids=False)
obj.uncross_reference()
obj.safe_cross_reference(model, xref_errors)
out = self.make_caeros(model)
(has_caero, caero_points, ncaeros, ncaeros_sub, ncaeros_cs,
ncaeros_points, ncaero_sub_points,
has_control_surface, box_id_to_caero_element_map, cs_box_ids) = out
self.has_caero = has_caero
self._create_aero(model, box_id_to_caero_element_map, cs_box_ids,
caero_points, ncaeros_points, ncaero_sub_points,
has_control_surface)
self.Render()
def _create_aero(self, model, box_id_to_caero_element_map, cs_box_ids,
caero_points, ncaeros_points, ncaero_sub_points, has_control_surface):
# fill grids
zfighting_offset0 = 0.001
zfighting_offset = zfighting_offset0
self._create_splines(model, box_id_to_caero_element_map, caero_points)
if 'caero' in self.gui.alt_grids:
self.set_caero_grid(ncaeros_points, model)
self.set_caero_subpanel_grid(ncaero_sub_points, model)
if has_control_surface:
cs_name = 'caero_control_surfaces'
self.set_caero_control_surface_grid(
cs_name, cs_box_ids[cs_name],
box_id_to_caero_element_map, caero_points,
zfighting_offset=zfighting_offset)
zfighting_offset += zfighting_offset0
# sort the control surfaces
labels_to_aesurfs = {aesurf.label: aesurf for aesurf in model.aesurf.values()}
if len(labels_to_aesurfs) != len(model.aesurf):
msg = (
'Expected same number of label->aesurf as aid->aesurf\n'
'labels_to_aesurfs = %r\n'
'model.aesurf = %r\n' % (labels_to_aesurfs, model.aesurf))
raise RuntimeError(msg)
for unused_label, aesurf in sorted(labels_to_aesurfs.items()):
#reset_labels = False
cs_name = '%s_control_surface' % aesurf.label
self.set_caero_control_surface_grid(
cs_name, cs_box_ids[cs_name],
box_id_to_caero_element_map, caero_points, note=aesurf.label,
zfighting_offset=zfighting_offset)
zfighting_offset += zfighting_offset0
def _set_subcases_unvectorized(self, model, form, cases, icase, xref_nodes, xref_loads):
"""helper for ``load_nastran_geometry_unvectorized``"""
settings = self.gui.settings # type: Settings
colormap = settings.colormap
form0 = form[2]
assert icase is not None
nsubcases = len(model.subcases)
for subcase_idi, subcase in sorted(model.subcases.items()):
if not xref_nodes:
continue
subcase_id = subcase_idi
if subcase_id == 0 and nsubcases == 1:
subcase_id = 1
elif subcase_id == 0:
continue
self.gui.log_debug('NastranIOv subcase_id = %s' % subcase_id)
subtitle = ''
if 'SUBTITLE' in subcase:
subtitle, options = subcase.get_parameter('SUBTITLE')
del options
load_str = 'Load Case=%i' % subcase_id if subtitle == '' else 'Load Case=%i; %s' % (
subcase_id, subtitle)
formi = (load_str, None, [])
formii = formi[2]
assert icase is not None
if self.normals is not None and self.plot_applied_loads:
icase = self._plot_applied_loads(
model, cases, formii, icase, subcase_idi, xref_loads=xref_loads,
colormap=colormap,
)
#plot_pressures = False
plot_pressures = True
else:
plot_pressures = True
if plot_pressures: # and self._plot_pressures:
try:
icase = self._plot_pressures(
model, cases, formii, icase, subcase_idi)
except KeyError:
s = StringIO()
traceback.print_exc(file=s)
sout = s.getvalue()
self.gui.log_error(sout)
print(sout)
if len(formii):
form0.append(formi)
return icase
def _create_caero_actors(self, ncaeros, ncaeros_sub, ncaeros_cs, has_control_surface):
"""
This just creates the following actors. It does not fill them.
These include:
- caero
- caero_subpanels
- caero_control_surfaces
"""
if self.has_caero:
gui = self.gui
gui.create_alternate_vtk_grid(
'caero', color=YELLOW_FLOAT, line_width=3, opacity=1.0,
representation='toggle', is_visible=True, is_pickable=False)
gui.create_alternate_vtk_grid(
'caero_subpanels', color=YELLOW_FLOAT, line_width=3, opacity=1.0,
representation='toggle', is_visible=False, is_pickable=False)
gui.alt_grids['caero'].Allocate(ncaeros, 1000)
gui.alt_grids['caero_subpanels'].Allocate(ncaeros_sub, 1000)
if has_control_surface:
gui.alt_grids['caero_control_surfaces'].Allocate(ncaeros_cs, 1000)
def _set_caero_representation(self, has_control_surface: bool) -> None:
"""
Parameters
----------
has_control_surface : bool
is there a control surface
"""
geometry_actors = self.gui.geometry_actors
if 'caero_control_surfaces' in geometry_actors:
self.gui.geometry_properties['caero_control_surfaces'].opacity = 0.5
if 'caero' not in geometry_actors:
return
geometry_actors['caero'].Modified()
geometry_actors['caero_subpanels'].Modified()
if has_control_surface:
geometry_actors['caero_control_surfaces'].Modified()
if hasattr(geometry_actors['caero'], 'Update'):
geometry_actors['caero'].Update()
if hasattr(geometry_actors['caero_subpanels'], 'Update'):
geometry_actors['caero_subpanels'].Update()
if has_control_surface and hasattr(geometry_actors['caero_subpanels'], 'Update'):
geometry_actors['caero_control_surfaces'].Update()
def _create_splines(self, model: BDF, box_id_to_caero_element_map: Dict[int, int], caero_points):
"""
Sets the following actors:
- spline_%s_structure_points % spline_id
- spline_%s_boxes % spline_id
Parameters
----------
model : BDF()
the bdf model
box_id_to_caero_element_map : dict[key] : value
???
caero_points : ???
???
"""
stored_msg = []
if model.splines:
# 0 - caero / caero_subpanel
# 1 - control surface
# 3/5/7/... - spline points
# 2/4/6/... - spline panels
iaero = 2
for spline_id, spline in sorted(model.splines.items()):
setg_ref = spline.setg_ref
if setg_ref is None:
msg = 'error cross referencing SPLINE:\n%s' % spline.rstrip()
#n, filename = log_properties(1)
#print(filename, n)
#stored_msg.append(msg)
self.log.error(msg)
#raise RuntimeError(msg)
continue
else:
structure_points = setg_ref.get_ids()
try:
aero_box_ids = spline.aero_element_ids
except:
print(spline.object_attributes())
print(spline.object_methods())
raise
if spline.type != 'SPLINE3_ZAERO':
assert len(aero_box_ids) > 0, spline
# the control surfaces all lie perfectly on top of each other
# such that we have z fighting, so based on the aero index,
# we calculate a z offset.
zfighting_offset = 0.0001 * (iaero + 1)
grid_name = 'spline_%s_structure_points' % spline_id
self.gui.create_alternate_vtk_grid(
grid_name, color=BLUE_FLOAT, opacity=1.0, point_size=5,
representation='point', is_visible=False)
msg = ', which is required by %r' % grid_name
stored_msgi = self._add_nastran_nodes_to_grid(
grid_name, structure_points, model, msg, store_msg=True)
zfighting_offset = 0.0001 * (iaero + 2)
grid_name = 'spline_%s_boxes' % spline_id
self.gui.create_alternate_vtk_grid(
grid_name, color=BLUE_FLOAT, opacity=0.3,
line_width=4,
representation='toggle', is_visible=False)
stored_msgi2 = self.set_caero_control_surface_grid(
grid_name, aero_box_ids,
box_id_to_caero_element_map, caero_points,
zfighting_offset=zfighting_offset, store_msg=True)
iaero += 2
if stored_msgi:
stored_msg.append(stored_msgi)
if stored_msgi2:
stored_msg.append(stored_msgi2)
if stored_msg:
model.log.warning('\n' + '\n'.join(stored_msg))
def make_caeros(self, model: BDF) -> Tuple[np.ndarray, int, int, int, int, bool,
Dict[int, int], List[int]]:
"""
Creates the CAERO panel inputs including:
- caero
- caero_subpanels
- caero_control_surfaces
- N control surfaces
Parameters
----------
model : BDF()
the bdf model
Returns
-------
caero_points : (N_aero_points, 3) float ndarray
the xyz points for the aero panels
N_aero_points can be 0
ncaeros : int
the number of aero sub-panels?
ncaeros_sub : int
???
ncaeros_cs : int
???
ncaeros_points : int
number of points for the caero coarse grid
ncaero_sub_points : int
number of points for the caero fine/subpanel grid
has_control_surface : bool
is there a control surface
box_id_to_caero_element_map : dict[box_id] = box_index
used to map the CAEROx box id to index in the ???
(aero panel elements) array, which will be used with
cs_box_ids
cs_box_ids : dict[control_surface_name] : List[panel ids]
list of panels used by each aero panel
"""
has_caero = False
ncaeros = 0
ncaeros_sub = 0
ncaeros_cs = 0
ncaeros_points = 0
ncaero_sub_points = 0
has_control_surface = False
box_id_to_caero_element_map = {}
cs_box_ids = defaultdict(list)
# when caeros is empty, SPLINEx/AESURF cannot be defined
if len(model.caeros) == 0:
caero_points = np.empty((0, 3))
out = (
has_caero, caero_points, ncaeros, ncaeros_sub, ncaeros_cs,
ncaeros_points, ncaero_sub_points,
has_control_surface, box_id_to_caero_element_map, cs_box_ids,
)
return out
ncaeros, ncaeros_sub, ncaeros_points, ncaero_sub_points = get_caero_count(model)
caero_points, has_caero = get_caero_points(model, box_id_to_caero_element_map)
# check for any control surfcaes
if model.aesurf:
has_control_surface = True
#ncaero_cs_points = 0
self.gui.create_alternate_vtk_grid(
'caero_control_surfaces', color=PINK_FLOAT, line_width=5, opacity=1.0,
representation='surface', is_visible=False)
# sort the control surfaces
labels_to_aesurfs = {aesurf.label: aesurf for aesurf in model.aesurf.values()}
if len(labels_to_aesurfs) != len(model.aesurf):
msg = (
'Expected same number of label->aesurf as aid->aesurf\n'
'labels_to_aesurfs = %r\n'
'model.aesurf = %r\n' % (labels_to_aesurfs, model.aesurf))
raise RuntimeError(msg)
for unused_label, aesurf in sorted(model.aesurf.items()):
if aesurf.type == 'AESURFZ':
aero_element_ids = aesurf.aero_element_ids
ncaeros_cs += len(aero_element_ids)
cs_name = '%s_control_surface' % aesurf.label
self.gui.create_alternate_vtk_grid(
cs_name, color=PINK_FLOAT, line_width=5, opacity=0.5,
representation='surface')
cs_box_ids['caero_control_surfaces'].extend(aero_element_ids)
cs_box_ids[cs_name].extend(aero_element_ids)
else:
aelist_ref = aesurf.alid1_ref
if aelist_ref is None:
self.log.error('AESURF does not reference an AELIST\n%s' % (
aesurf.rstrip()))
continue
ncaeros_cs += len(aelist_ref.elements)
cs_name = '%s_control_surface' % aesurf.label
self.gui.create_alternate_vtk_grid(
cs_name, color=PINK_FLOAT, line_width=5, opacity=0.5,
representation='surface')
cs_box_ids['caero_control_surfaces'].extend(aelist_ref.elements)
cs_box_ids[cs_name].extend(aelist_ref.elements)
if aesurf.alid2 is not None:
aelist_ref = aesurf.alid2_ref
ncaeros_cs += len(aelist_ref.elements)
cs_box_ids[cs_name].extend(aelist_ref.elements)
cs_box_ids['caero_control_surfaces'].extend(aelist_ref.elements)
out = (
has_caero, caero_points, ncaeros, ncaeros_sub, ncaeros_cs,
ncaeros_points, ncaero_sub_points,
has_control_surface, box_id_to_caero_element_map, cs_box_ids,
)
return out
def set_caero_grid(self, ncaeros_points, model):
"""
Sets the CAERO panel geometry.
Parameters
----------
ncaeros_points : int
number of points used by the 'caero' actor
model : BDF()
the bdf model
"""
gui = self.gui
points = vtk.vtkPoints()
points.SetNumberOfPoints(ncaeros_points)
max_cpoints = []
min_cpoints = []
zfighting_offset = 0.0001
caero_grid = gui.alt_grids['caero']
j = 0
for unused_eid, element in sorted(model.caeros.items()):
if isinstance(element, (CAERO1, CAERO3, CAERO4, CAERO5, CAERO7)):
# wing panel
cpoints = element.get_points()
cpoints[0][2] += zfighting_offset
cpoints[1][2] += zfighting_offset
max_cpoints.append(np.array(cpoints).max(axis=0))
min_cpoints.append(np.array(cpoints).min(axis=0))
elem = vtkQuad()
elem.GetPointIds().SetId(0, j)
elem.GetPointIds().SetId(1, j + 1)
elem.GetPointIds().SetId(2, j + 2)
elem.GetPointIds().SetId(3, j + 3)
points.InsertPoint(j, *cpoints[0])
points.InsertPoint(j + 1, *cpoints[1])
points.InsertPoint(j + 2, *cpoints[2])
points.InsertPoint(j + 3, *cpoints[3])
caero_grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
j += 4
elif isinstance(element, (CAERO2, BODY7)):
# slender body
#if 0: # pragma: no cover
# 1D version
#cpoints = element.get_points()
#cpoints[:, 2] += zfighting_offset
#max_cpoints.append(np.array(cpoints).max(axis=0))
#min_cpoints.append(np.array(cpoints).min(axis=0))
#elem = vtk.vtkLine()
#elem.GetPointIds().SetId(0, j)
#elem.GetPointIds().SetId(1, j + 1)
#points.InsertPoint(j, *cpoints[0])
#points.InsertPoint(j + 1, *cpoints[1])
#j += 2
#caero_grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
#else:
# 3D version
xyz, elems = element.get_points_elements_3d()
assert xyz is not None, element
xyz[:, 2] += zfighting_offset
for elemi in elems:
elem = vtkQuad()
elem.GetPointIds().SetId(0, j)
elem.GetPointIds().SetId(1, j + 1)
elem.GetPointIds().SetId(2, j + 2)
elem.GetPointIds().SetId(3, j + 3)
n1, n2, n3, n4 = elemi
points.InsertPoint(j, *xyz[n1])
points.InsertPoint(j + 1, *xyz[n2])
points.InsertPoint(j + 2, *xyz[n3])
points.InsertPoint(j + 3, *xyz[n4])
#cpoints = element.get_points()
#cpoints[0][2] += zfighting_offset
#cpoints[1][2] += zfighting_offset
#max_cpoints.append(np.array(cpoints).max(axis=0))
#min_cpoints.append(np.array(cpoints).min(axis=0))
caero_grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
j += 4
else:
gui.log_info("skipping %s" % element.type)
if ncaeros_points and len(max_cpoints):
gui.log_info('CAERO.max = %s' % np.vstack(max_cpoints).max(axis=0))
gui.log_info('CAERO.min = %s' % np.vstack(min_cpoints).min(axis=0))
caero_grid.SetPoints(points)
#gui.alt_grids['caero']
#edge_mapper.SetResolveCoincidentTopologyToPolygonOffset()
def set_caero_subpanel_grid(self, ncaero_sub_points, model):
"""
Sets the CAERO sub-panel geometry.
Parameters
----------
ncaero_sub_points : int
number of points used by the 'caero_subpanels' actor
model : BDF()
the bdf model
"""
points = vtk.vtkPoints()
points.SetNumberOfPoints(ncaero_sub_points)
vtk_type = vtkQuad().GetCellType()
grid = self.gui.alt_grids['caero_subpanels']
j = 0
for unused_eid, element in sorted(model.caeros.items()):
if isinstance(element, (CAERO1, CAERO3, CAERO4, CAERO5, CAERO7)):
pointsi, elementsi = element.panel_points_elements()
ipoint = 0
for ipoint, pointii in enumerate(pointsi):
points.InsertPoint(j + ipoint, *pointii)
elem = vtkQuad()
for elementi in elementsi:
elem = vtkQuad()
elem.GetPointIds().SetId(0, j + elementi[0])
elem.GetPointIds().SetId(1, j + elementi[1])
elem.GetPointIds().SetId(2, j + elementi[2])
elem.GetPointIds().SetId(3, j + elementi[3])
grid.InsertNextCell(vtk_type, elem.GetPointIds())
j += ipoint + 1
else:
self.gui.log_info("skipping %s" % element.type)
grid.SetPoints(points)
def set_caero_control_surface_grid(self, name, cs_box_ids,
box_id_to_caero_element_map,
caero_points, note=None,
zfighting_offset=0.001, store_msg=False):
"""
Creates a single CAERO control surface?
Parameters
----------
name : str
???
aero_box_ids : List[int]
the ids of the box as seen on the AESURF? SET card?
box_id_to_caero_element_map : Dict[key]=value
key : ???
???
value : ???
???
caero_points : (ncaero_points, 3)
the xyz coordinates used by the CAEROx actor
label : str / None
None : no label will be used
str : the name of the control surface card will be placed
at the centroid of the panel
zfighting_offset : float
z-fighting is when two elements "fight" for who is in front
leading. The standard way to fix this is to bump the
element.
Returns
-------
stored_msg : str
???
"""
gui = self.gui
log = self.gui.log
boxes_to_show, stored_msg = check_for_missing_control_surface_boxes(
name, cs_box_ids, box_id_to_caero_element_map, log,
store_msg=store_msg)
#if not boxes_to_show:
#print('*%s' % name)
#print('*%s' % boxes_to_show)
#return
areas = []
centroids = []
vtk_type = vtkQuad().GetCellType()
all_points = []
#if name not in gui.alt_grids:
#print('**%s' % name)
#return
j = 0
grid = gui.alt_grids[name]
grid.Reset()
for box_id in boxes_to_show:
elementi = box_id_to_caero_element_map[box_id]
pointsi = caero_points[elementi]
centroid = (pointsi[0] + pointsi[1] + pointsi[2] + pointsi[3]) / 4.
area = np.linalg.norm(np.cross(pointsi[2] - pointsi[0], pointsi[3] - pointsi[1])) / 2.
if area == 0.0:
print('box_id=%i has 0 area' % box_id)
continue
elem = vtkQuad()
point_ids = elem.GetPointIds()
point_ids.SetId(0, j)
point_ids.SetId(1, j + 1)
point_ids.SetId(2, j + 2)
point_ids.SetId(3, j + 3)
grid.InsertNextCell(vtk_type, point_ids)
all_points.append(pointsi)
centroids.append(centroid)
areas.append(area)
j += 4
if len(all_points) == 0:
log.error('deleting %r' % name)
# name = spline_1000_boxes
sname = name.split('_')
sname[-1] = 'structure_points'
# points_name = spline_1000_structure_points
points_name = '_'.join(sname)
log.error('deleting %r' % points_name)
gui.remove_alt_grid(name, remove_geometry_property=True)
gui.remove_alt_grid(points_name, remove_geometry_property=True)
return stored_msg
# combine all the points
all_points_array = np.vstack(all_points)
# shift z to remove z-fighting with caero in surface representation
all_points_array[:, [1, 2]] += zfighting_offset
# get the vtk object
points = numpy_to_vtk_points(all_points_array, deep=0)
#if missing_boxes:
#msg = 'Missing CAERO AELIST boxes: ' + str(missing_boxes)
#gui.log_error(msg)
if note:
# points_list (15, 4, 3) = (elements, nodes, 3)
x, y, z = np.average(centroids, weights=areas, axis=0)
text = str(note)
#slot = gui.label_actors[-1]
slot = gui.reset_label_actors(name)
annotation = gui.create_annotation(text, x, y, z)
slot.append(annotation)
grid.SetPoints(points)
return stored_msg
def _set_conm_grid(self, nconm2, model):
"""
creates the mass secondary actor called:
- conm2
which includes:
- CONM2
- CMASS1
- CMASS2
because it's really a "mass" actor
"""
j = 0
points = vtk.vtkPoints()
points.SetNumberOfPoints(nconm2)
#sphere_size = self._get_sphere_size(dim_max)
alt_grid = self.gui.alt_grids['conm2']
for unused_eid, element in sorted(model.masses.items()):
if isinstance(element, CONM2):
xyz_nid = element.nid_ref.get_position()
centroid = element.offset(xyz_nid)
#centroid_old = element.Centroid()
#assert np.all(np.allclose(centroid_old, centroid)), 'centroid_old=%s new=%s' % (centroid_old, centroid)
#d = norm(xyz - c)
points.InsertPoint(j, *centroid)
#if 1:
elem = vtk.vtkVertex()
elem.GetPointIds().SetId(0, j)
#else:
#elem = vtk.vtkSphere()
#elem.SetRadius(sphere_size)
#elem.SetCenter(points.GetPoint(j))
alt_grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
j += 1
elif element.type in ('CMASS1', 'CMASS2'):
centroid = element.Centroid()
#n1 = element.G1()
#n2 = element.G2()
#print('n1=%s n2=%s centroid=%s' % (n1, n2, centroid))
points.InsertPoint(j, *centroid)
elem = vtk.vtkVertex()
elem.GetPointIds().SetId(0, j)
alt_grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
j += 1
else:
self.gui.log_info("skipping %s" % element.type)
alt_grid.SetPoints(points)
def set_spc_mpc_suport_grid(self, model, nid_to_pid_map):
"""
for each subcase, make secondary actors including:
- spc_id=spc_id
- mpc_id=mpc_id (includes rigid elements)
- mpc_dependent_id=mpc_id (includes rigid elements)
- mpc_independent_id=mpc_id (includes rigid elements)
- suport_id=suport1_id (includes SUPORT/SUPORT1)
TODO: consider changing the varying ids to huh???
"""
spc_names = []
mpc_names = []
suport_names = []
#print('getting rigid')
rigid_lines = model._get_rigid()
spc_ids_used = set()
mpc_ids_used = set()
suport1_ids_used = set()
spc_to_subcase = defaultdict(list)
mpc_to_subcase = defaultdict(list)
#suport1_to_subcase = defaultdict(list)
for subcase_id, subcase in sorted(model.subcases.items()):
if 'SPC' in subcase:
spc_id = subcase.get_parameter('SPC')[0]
if spc_id is not None:
nspcs = model.card_count['SPC'] if 'SPC' in model.card_count else 0
nspc1s = model.card_count['SPC1'] if 'SPC1' in model.card_count else 0
nspcds = model.card_count['SPCD'] if 'SPCD' in model.card_count else 0
## TODO: this line seems too loose...
## TODO: why aren't SPCDs included?
if nspcs + nspc1s + nspcds:
spc_to_subcase[spc_id].append(subcase_id)
if 'MPC' in subcase:
mpc_id = subcase.get_parameter('MPC')[0]
if mpc_id is not None:
## TODO: this line seems too loose
nmpcs = model.card_count['MPC'] if 'MPC' in model.card_count else 0
if nmpcs:
mpc_to_subcase[mpc_id].append(subcase_id)
for spc_id in chain(model.spcs, model.spcadds):
spc_name = 'SPC=%i' % (spc_id)
if spc_id in mpc_to_subcase:
subcases = spc_to_subcase[spc_id]
spc_name += ': Subcases='
spc_name += ', '.join(str(subcase_id) for subcase_id in subcases)
spc_names += self._fill_spc(spc_id, spc_name, model, nid_to_pid_map)
for mpc_id in chain(model.mpcs, model.mpcadds):
depname = 'MPC=%i_dependent' % mpc_id
indname = 'MPC=%i_independent' % mpc_id
linename = 'MPC=%i_lines' % mpc_id
if mpc_id in mpc_to_subcase:
subcases = mpc_to_subcase[mpc_id]
mpc_name = ': Subcases='
mpc_name += ', '.join(str(subcase_id) for subcase_id in subcases)
depname += mpc_name
indname += mpc_name
linename += mpc_name
lines = get_mpc_node_ids(model, mpc_id, stop_on_failure=False)
lines2 = list(lines)
mpc_names += self._fill_dependent_independent(
mpc_id, model, lines2,
depname, indname, linename)
if 0: # pragma: no cover
for subcase_id, subcase in sorted(model.subcases.items()):
if 'SPC' in subcase:
spc_id = subcase.get_parameter('SPC')[0]
if spc_id is not None and spc_id not in spc_ids_used:
spc_ids_used.add(spc_id)
nspcs = model.card_count['SPC'] if 'SPC' in model.card_count else 0
nspc1s = model.card_count['SPC1'] if 'SPC1' in model.card_count else 0
nspcds = model.card_count['SPCD'] if 'SPCD' in model.card_count else 0
## TODO: this line seems too loose...
## TODO: why aren't SPCDs included?
if nspcs + nspc1s + nspcds:
spc_name = 'spc_id=%i' % spc_id
spc_names += self._fill_spc(spc_id, spc_name, model, nid_to_pid_map)
# rigid body elements and MPCs
if 'MPC' in subcase:
mpc_id = subcase.get_parameter('MPC')[0]
if mpc_id is not None and mpc_id not in mpc_ids_used:
mpc_ids_used.add(mpc_id)
## TODO: this line seems too loose
nmpcs = model.card_count['MPC'] if 'MPC' in model.card_count else 0
if nmpcs:
lines = get_mpc_node_ids(model, mpc_id, stop_on_failure=False)
lines2 = list(lines)
depname = 'mpc_id=%i_dependent' % mpc_id
indname = 'mpc_id=%i_independent' % mpc_id
linename = 'mpc_id=%i_lines' % mpc_id
mpc_names += self._fill_dependent_independent(
mpc_id, model, lines2,
depname, indname, linename)
# SUPORTs are node/dofs that deconstrained to allow rigid body motion
# SUPORT1s are subcase-specific SUPORT cards
if 'SUPORT1' in subcase.params: ## TODO: should this be SUPORT?
suport_id = subcase.get_parameter('SUPORT1')[0]
# TODO: is this line correct???
if 'SUPORT' in model.card_count or 'SUPORT1' in model.card_count:
# TODO: this "if block" seems unnecessary
if suport_id is not None and suport_id not in suport1_ids_used:
# SUPORT1 / SUPORT
suport1_ids_used.add(suport_id)
suport_name = self._fill_suport(suport_id, subcase_id, model)
suport_names.append(suport_name)
# create a SUPORT actor if there are no SUPORT1s
# otherwise, we already included it in suport_id=suport_id
if len(suport_names) == 0 and model.suport:
# handle SUPORT without SUPORT1
ids = []
for suport in model.suport:
idsi = suport.node_ids
ids += idsi
grid_name = 'SUPORT'
self.gui.create_alternate_vtk_grid(
grid_name, color=RED_FLOAT, opacity=1.0, point_size=4,
representation='point', is_visible=True)
if len(rigid_lines):
# handle RBEs without MPCs
mpc_id = 0
depname = 'rigid_dependent'
indname = 'rigid_independent'
linename = 'rigid_lines'
mpc_names += self._fill_dependent_independent(
mpc_id, model, rigid_lines,
depname, indname, linename)
geometry_names = spc_names + mpc_names + suport_names
return geometry_names
def _fill_spc(self, spc_id, spc_name, model, nid_to_pid_map):
"""creates the spc secondary actors"""
spc_names = [spc_name]
self.gui.create_alternate_vtk_grid(
spc_name, color=PURPLE_FLOAT, line_width=5, opacity=1.,
point_size=5, representation='point', is_visible=False)
# node_ids = model.get_SPCx_node_ids(spc_id)
node_ids_c1 = model.get_SPCx_node_ids_c1(
spc_id, stop_on_failure=False)
node_ids = []
for nid, c1 in node_ids_c1.items():
if nid_to_pid_map is not None:
plot_node = False
pids = nid_to_pid_map[nid]
for pid in pids:
if pid == 0:
# CONROD
continue
if pid is None:
print('pid is None in _fill_spc...')
continue
if pid < 0:
print('pid=%s in _fill_spc...' % pid)
continue
prop = model.properties[pid]
if prop.type not in ['PSOLID', 'PLSOLID']:
plot_node = True
if not plot_node:
# don't include 456 constraints if they're ONLY on solid elemetns
# if we had any bar/plate/etc. elements that use this node, we'll plot the node
if not('1' in c1 or '2' in c1 or '3' in c1):
continue
node_ids.append(nid)
node_ids = np.unique(node_ids)
msg = ', which is required by %r' % spc_name
self._add_nastran_nodes_to_grid(spc_name, node_ids, model, msg)
return spc_names
def create_bar_pin_flag_text(self, unused_pin_flag=None):
"""
Lists the pin flag for each element (that has a pin flag)
self.nid_release_map is set by ``_fill_bar_yz``
TODO: needs a better interface in the gui
"""
nids = []
text = []
#result_name = self.icase
result_name = str('ElementID')
for nid, data in sorted(self.nid_release_map.items()):
sub_release_map = defaultdict(str)
for (eid, pin_flagi) in data:
sub_release_map[pin_flagi] += (str(eid) + ', ')
texti = '\n'.join(['%s-%s' % (pin_flagi, msg.rstrip(', '))
for (pin_flagi, msg) in sorted(sub_release_map.items())])
# super messy
#texti = ', '.join(['%s-%s' % (pin_flagi, eid) for (eid, pin_flagi) in data])
nids.append(nid)
text.append(texti)
self.gui.mark_nodes(nids, result_name, text)
def _fill_bar_yz(self, unused_dim_max, model, icase, cases, form, debug=False):
"""
plots the y, z vectors for CBAR & CBEAM elements
"""
card_types = ['CBAR', 'CBEAM']
out = model.get_card_ids_by_card_types(card_types=card_types)
bar_beam_eids = out['CBAR'] + out['CBEAM']
bar_pid_to_eids = get_beam_sections_map(model, bar_beam_eids)
bar_nids = get_bar_nids(model, bar_beam_eids)
#ugrid_temp = create_3d_beams(model, bar_pid_to_eids)
self.bar_eids = {}
self.bar_lines = {}
if len(bar_beam_eids) == 0:
return icase
scale = 0.15
# TODO: this should be reworked
bar_nids, bar_types, nid_release_map = self._get_bar_yz_arrays(
model, bar_beam_eids, bar_pid_to_eids,
scale, debug)
self.nid_release_map = nid_release_map
bar_nids = list(bar_nids)
self.gui.create_alternate_vtk_grid(
'Bar Nodes', color=RED_FLOAT, line_width=1, opacity=1.,
point_size=5, representation='point', bar_scale=0., is_visible=False)
msg = ", which is required by 'Bar Nodes'"
self._add_nastran_nodes_to_grid('Bar Nodes', bar_nids, model, msg)
geo_form = form[2]
bar_form = ('CBAR / CBEAM', None, [])
#print('geo_form =', geo_form)
#bar_types2 = {}
bar_eids = []
for bar_type, data in sorted(bar_types.items()):
eids, lines_bar_y, lines_bar_z = data
if len(eids):
bar_eids.append(eids)
ibars = 0
if bar_eids:
bar_eids = np.hstack(bar_eids)
ibars = np.searchsorted(self.element_ids, bar_eids)
for bar_type, data in sorted(bar_types.items()):
eids, lines_bar_y, lines_bar_z = data
if len(eids):
if debug: # pragma: no cover
print('bar_type = %r' % bar_type)
print('eids = %r' % eids)
print('all_eids = %r' % self.element_ids.tolist())
# if bar_type not in ['ROD', 'TUBE']:
bar_y = bar_type + '_y'
bar_z = bar_type + '_z'
self.gui.create_alternate_vtk_grid(
bar_y, color=GREEN_FLOAT, line_width=5, opacity=1.,
point_size=5, representation='bar', bar_scale=scale, is_visible=False)
self.gui.create_alternate_vtk_grid(
bar_z, color=BLUE_FLOAT, line_width=5, opacity=1.,
point_size=5, representation='bar', bar_scale=scale, is_visible=False)
self._add_nastran_lines_xyz_to_grid(bar_y, lines_bar_y, eids)
self._add_nastran_lines_xyz_to_grid(bar_z, lines_bar_z, eids)
# form = ['Geometry', None, []]
i = np.searchsorted(self.element_ids, eids)
is_type = np.full(self.element_ids.shape, -1, dtype='int32')
is_type[ibars] = 0
try:
is_type[i] = 1
except:
#print('self.element_ids =', self.element_ids)
#print('eids =', eids)
ii = np.where(i == len(self.element_ids))[0]
print('ii = %s' % ii)
print('failed eids =', eids[ii])
#assert self.element_ids[i] == eids
raise
bar_form[2].append(['is_%s' % bar_type, icase, []])
msg = 'is_%s' % bar_type
type_res = GuiResult(0, header=msg, title=msg,
location='centroid', scalar=is_type, mask_value=-1)
cases[icase] = (type_res, (0, msg))
icase += 1
# print(geo_form)
if len(bar_form[2]):
geo_form.append(bar_form)
return icase
def _add_nastran_lines_xyz_to_grid(self, name, lines, eids):
"""creates the bar orientation vector lines"""
nlines = len(lines)
nnodes = nlines * 2
if nlines == 0:
return
assert name != 'Bar Nodes', name
grid = self.gui.alt_grids[name]
bar_eids = np.asarray(eids, dtype='int32')
bar_lines = np.asarray(lines, dtype='float32').reshape(nlines, 6)
self.bar_eids[name] = bar_eids
self.bar_lines[name] = bar_lines
nodes = bar_lines.reshape(nlines * 2, 3)
points = numpy_to_vtk_points(nodes)
elements = np.arange(0, nnodes, dtype='int32').reshape(nlines, 2)
etype = 3 # vtk.vtkLine().GetCellType()
create_vtk_cells_of_constant_element_type(grid, elements, etype)
grid.SetPoints(points)
def _fill_dependent_independent(self, unused_mpc_id, model, lines,
depname, indname, linename):
"""creates the mpc actors"""
if not lines:
return []
self.gui.create_alternate_vtk_grid(
depname, color=GREEN_FLOAT, line_width=5, opacity=1.,
point_size=5, representation='point', is_visible=False)
self.gui.create_alternate_vtk_grid(
indname, color=LIGHT_GREEN_FLOAT, line_width=5, opacity=1.,
point_size=5, representation='point', is_visible=False)
self.gui.create_alternate_vtk_grid(
linename, color=LIGHT_GREEN_FLOAT, line_width=5, opacity=1.,
point_size=5, representation='wire', is_visible=False)
lines2 = []
for line in lines:
if line not in lines2:
lines2.append(line)
lines = np.array(lines2, dtype='int32')
dependent = (lines[:, 0])
independent = np.unique(lines[:, 1])
self.dependents_nodes.update(dependent)
unused_node_ids = np.unique(lines.ravel())
msg = ', which is required by %r' % depname
self._add_nastran_nodes_to_grid(depname, dependent, model, msg)
msg = ', which is required by %r' % indname
self._add_nastran_nodes_to_grid(indname, independent, model, msg)
msg = ', which is required by %r' % linename
self._add_nastran_lines_to_grid(linename, lines, model)
mpc_names = [depname, indname, linename]
return mpc_names
def _add_nastran_nodes_to_grid(self, name, node_ids, model, msg, store_msg=False):
"""used to create MPC independent/dependent nodes"""
nnodes = len(node_ids)
stored_msg = []
if nnodes == 0:
msg = '0 nodes added for %r' % name
out_msg = store_warning(model.log, store_msg, msg)
return out_msg
self.gui.follower_nodes[name] = node_ids
#numpy_to_vtk_points(nodes)
points = vtk.vtkPoints()
points.SetNumberOfPoints(nnodes)
j = 0
nid_map = self.gui.nid_map
alt_grid = self.gui.alt_grids[name]
missing_nodes = []
for nid in sorted(node_ids):
try:
unused_i = nid_map[nid]
except KeyError:
missing_nodes.append(str(nid))
continue
if nid not in model.nodes:
# I think this hits for SPOINTs
missing_nodes.append(str(nid))
continue
# point = self.grid.GetPoint(i)
# points.InsertPoint(j, *point)
node = model.nodes[nid]
point = node.get_position()
points.InsertPoint(j, *point)
#if 1:
elem = vtk.vtkVertex()
elem.GetPointIds().SetId(0, j)
#else:
#elem = vtk.vtkSphere()
#dim_max = 1.0
#sphere_size = self._get_sphere_size(dim_max)
#elem.SetRadius(sphere_size)
#elem.SetCenter(points.GetPoint(j))
alt_grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
j += 1
out_msg = ''
if missing_nodes:
stored_msg = 'nids=[%s] do not exist%s' % (', '.join(missing_nodes), msg)
alt_grid.SetPoints(points)
if stored_msg:
out_msg = store_warning(model.log, store_msg, stored_msg)
return out_msg
def _add_nastran_spoints_to_grid(self, spoints, nid_map):
"""used to create SPOINTs"""
if not spoints:
return
spoint_ids = list(spoints.keys())
assert isinstance(spoint_ids, list), type(spoint_ids)
nspoints = len(spoint_ids)
name = 'SPoints'
if nspoints == 0:
self.log.warning('0 spoints added for %r' % name)
return
self.gui.create_alternate_vtk_grid(
name, color=BLUE_FLOAT, line_width=1, opacity=1.,
point_size=5, representation='point', bar_scale=0., is_visible=True)
self.gui.follower_nodes[name] = spoint_ids
points = vtk.vtkPoints()
points.SetNumberOfPoints(nspoints)
j = 0
alt_grid = self.gui.alt_grids[name]
for spointi in sorted(spoint_ids):
try:
unused_i = nid_map[spointi]
except KeyError:
self.log.warning('spointi=%s does not exist' % spointi)
continue
if spointi not in spoints:
self.log.warning('spointi=%s doesnt exist' % spointi)
continue
# point = self.grid.GetPoint(i)
# points.InsertPoint(j, *point)
points.InsertPoint(j, 0., 0., 0.)
elem = vtk.vtkVertex()
elem.GetPointIds().SetId(0, j)
alt_grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
j += 1
alt_grid.SetPoints(points)
def _add_nastran_lines_to_grid(self, name, lines, model, nid_to_pid_map=None):
"""used to create MPC lines"""
nlines = lines.shape[0]
#nids = np.unique(lines)
#nnodes = len(nids)
nnodes = nlines * 2
if nnodes == 0:
return
self.gui.follower_nodes[name] = lines.ravel()
points = vtk.vtkPoints()
points.SetNumberOfPoints(nnodes)
j = 0
etype = 3 # vtkLine
nid_map = self.gui.nid_map
alt_grid = self.gui.alt_grids[name]
for nid1, nid2 in lines:
try:
unused_i1 = nid_map[nid1]
except KeyError:
model.log.warning('nid=%s does not exist' % nid1)
continue
try:
unused_i2 = nid_map[nid2]
except KeyError:
model.log.warning('nid=%s does not exist' % nid2)
continue
if nid1 not in model.nodes or nid2 not in model.nodes:
continue
node = model.nodes[nid1]
point = node.get_position()
points.InsertPoint(j, *point)
node = model.nodes[nid2]
point = node.get_position()
points.InsertPoint(j + 1, *point)
elem = vtk.vtkLine()
point_ids = elem.GetPointIds()
point_ids.SetId(0, j)
point_ids.SetId(1, j + 1)
alt_grid.InsertNextCell(etype, point_ids)
j += 2
alt_grid.SetPoints(points)
def _fill_suport(self, suport_id, unused_subcase_id, model):
"""creates SUPORT and SUPORT1 nodes"""
suport_name = 'suport1_id=%i' % suport_id
self.gui.create_alternate_vtk_grid(
suport_name, color=RED_FLOAT, line_width=5, opacity=1., point_size=4,
representation='point', is_visible=False)
suport_nids = get_suport_node_ids(model, suport_id)
msg = ', which is required by %r' % suport_name
self._add_nastran_nodes_to_grid(suport_name, suport_nids, model, msg)
return suport_name
def _get_sphere_size(self, dim_max):
return 0.01 * dim_max
def _map_elements3(self, nid_map, model, unused_j, unused_dim_max,
nid_cp_cd, xref_loads=True):
"""
Much, much faster way to add elements that directly builds the VTK objects
rather than using for loops.
Returns
-------
nid_to_pid_map : dict
node to property id map
used to show SPC constraints (we don't want to show constraints on 456 DOFs)
icase : int
the result number
cases : dict
the GuiResult objects
form : List[???, ???, ???]
the Results sidebar data
TDOO: Not quite done on:
- ???
"""
settings = self.gui.settings # type: Settings
# these normals point inwards
# 4
# / | \
# / | \
# 3-------2
# \ | /
# \ | /
# 1
_ctetra_faces = (
(0, 1, 2), # (1, 2, 3),
(0, 3, 1), # (1, 4, 2),
(0, 3, 2), # (1, 3, 4),
(1, 3, 2), # (2, 4, 3),
)
# these normals point inwards
#
#
#
#
# /4-----3
# / /
# / 5 /
# / \ /
# / \ /
# 1---------2
_cpyram_faces = (
(0, 1, 2, 3), # (1, 2, 3, 4),
(1, 4, 2), # (2, 5, 3),
(2, 4, 3), # (3, 5, 4),
(0, 3, 4), # (1, 4, 5),
(0, 4, 1), # (1, 5, 2),
)
# these normals point inwards
# /6
# / | \
# / | \
# 3\ | \
# | \ /4-----5
# | \/ /
# | / \ /
# | / \ /
# | / \ /
# 1---------2
_cpenta_faces = (
(0, 2, 1), # (1, 3, 2),
(3, 4, 5), # (4, 5, 6),
(0, 1, 4, 3), # (1, 2, 5, 4), # bottom
(1, 2, 5, 4), # (2, 3, 6, 5), # right
(0, 3, 5, 2), # (1, 4, 6, 3), # left
)
# these normals point inwards
# 8----7
# /| /|
# / | / |
# / 5-/--6
# 4-----3 /
# | / | /
# | / | /
# 1-----2
_chexa_faces = (
(4, 5, 6, 7), # (5, 6, 7, 8),
(0, 3, 2, 1), # (1, 4, 3, 2),
(1, 2, 6, 5), # (2, 3, 7, 6),
(2, 3, 7, 6), # (3, 4, 8, 7),
(0, 4, 7, 3), # (1, 5, 8, 4),
(0, 6, 5, 4), # (1, 7, 6, 5),
)
elements, nelements, unused_superelements = get_elements_nelements_unvectorized(model)
xyz_cid0 = self.xyz_cid0
pids_array = np.zeros(nelements, dtype='int32')
eids_array = np.zeros(nelements, dtype='int32')
mcid_array = np.full(nelements, -1, dtype='int32')
material_theta_array = np.full(nelements, np.nan, dtype='float32')
dim_array = np.full(nelements, -1, dtype='int32')
nnodes_array = np.full(nelements, -1, dtype='int32')
# quality
min_interior_angle = np.zeros(nelements, 'float32')
max_interior_angle = np.zeros(nelements, 'float32')
dideal_theta = np.zeros(nelements, 'float32')
max_skew_angle = np.zeros(nelements, 'float32')
max_warp_angle = np.zeros(nelements, 'float32')
max_aspect_ratio = np.zeros(nelements, 'float32')
area = np.zeros(nelements, 'float32')
area_ratio = np.zeros(nelements, 'float32')
taper_ratio = np.zeros(nelements, 'float32')
min_edge_length = np.zeros(nelements, 'float32')
normals = np.full((nelements, 3), np.nan, 'float32')
nids_list = []
ieid = 0
cell_offset = 0
dtype = get_numpy_idtype_for_vtk()
cell_types_array = np.zeros(nelements, dtype=dtype)
cell_offsets_array = np.zeros(nelements, dtype=dtype)
cell_type_point = vtk.vtkVertex().GetCellType()
cell_type_line = vtk.vtkLine().GetCellType()
cell_type_tri3 = vtkTriangle().GetCellType()
cell_type_tri6 = vtkQuadraticTriangle().GetCellType()
cell_type_quad4 = vtkQuad().GetCellType()
#cell_type_quad8 = vtkQuadraticQuad().GetCellType()
cell_type_tetra4 = vtkTetra().GetCellType()
cell_type_tetra10 = vtkQuadraticTetra().GetCellType()
cell_type_pyram5 = vtkPyramid().GetCellType()
#cell_type_pyram13 = vtk.vtkQuadraticPyramid().GetCellType()
cell_type_penta6 = vtkWedge().GetCellType()
cell_type_penta15 = vtkQuadraticWedge().GetCellType()
cell_type_hexa8 = vtkHexahedron().GetCellType()
cell_type_hexa20 = vtkQuadraticHexahedron().GetCellType()
# per gui/testing_methods.py/create_vtk_cells_of_constant_element_type
#1 = vtk.vtkVertex().GetCellType()
#3 = vtkLine().GetCellType()
#5 = vtkTriangle().GetCellType()
#9 = vtk.vtkQuad().GetCellType()
#10 = vtkTetra().GetCellType()
#vtkPenta().GetCellType()
#vtkHexa().GetCellType()
#vtkPyram().GetCellType()
skipped_etypes = set()
all_nids = nid_cp_cd[:, 0]
ieid = 0
for eid, elem in sorted(elements.items()):
if ieid % 5000 == 0 and ieid > 0:
print(' map_elements = %i' % ieid)
etype = elem.type
nnodes = None
nids = None
pid = None
cell_type = None
inids = None
dideal_thetai = np.nan
min_thetai = np.nan
max_thetai = np.nan
#max_thetai = np.nan
max_skew = np.nan
max_warp = np.nan
aspect_ratio = np.nan
areai = np.nan
area_ratioi = np.nan
taper_ratioi = np.nan
min_edge_lengthi = np.nan
normali = np.nan
if etype in ['CTRIA3', 'CTRIAR', 'CTRAX3', 'CPLSTN3', 'CPLSTS3']:
nids = elem.nodes
pid = elem.pid
cell_type = cell_type_tri3 # 5
inids = np.searchsorted(all_nids, nids)
p1, p2, p3 = xyz_cid0[inids, :]
out = tri_quality(p1, p2, p3)
(areai, max_skew, aspect_ratio,
min_thetai, max_thetai, dideal_thetai, min_edge_lengthi) = out
normali = np.cross(p1 - p2, p1 - p3)
if isinstance(elem.theta_mcid, float):
material_theta_array[ieid] = elem.theta_mcid
else:
mcid_array[ieid] = elem.theta_mcid
nnodes = 3
dim = 2
elif etype in ['CQUAD4', 'CQUADR', 'CPLSTN4', 'CPLSTS4', 'CQUADX4']:
nids = elem.nodes
pid = elem.pid
cell_type = cell_type_quad4 #9
inids = np.searchsorted(all_nids, nids)
p1, p2, p3, p4 = xyz_cid0[inids, :]
out = quad_quality(elem, p1, p2, p3, p4)
(areai, taper_ratioi, area_ratioi, max_skew, aspect_ratio,
min_thetai, max_thetai, dideal_thetai, min_edge_lengthi, max_warp) = out
normali = np.cross(p1 - p3, p2 - p4)
if isinstance(elem.theta_mcid, float):
material_theta_array[ieid] = elem.theta_mcid
else:
mcid_array[ieid] = elem.theta_mcid
nnodes = 4
dim = 2
elif etype == 'CTRIA6':
nids = elem.nodes
pid = elem.pid
if None in nids:
cell_type = cell_type_tri3
inids = np.searchsorted(all_nids, nids[:3])
nids = nids[:3]
p1, p2, p3 = xyz_cid0[inids, :]
nnodes = 3
else:
cell_type = cell_type_tri6
inids = np.searchsorted(all_nids, nids)
p1, p2, p3, p4, unused_p5, unused_p6 = xyz_cid0[inids, :]
nnodes = 6
out = tri_quality(p1, p2, p3)
(areai, max_skew, aspect_ratio,
min_thetai, max_thetai, dideal_thetai, min_edge_lengthi) = out
normali = np.cross(p1 - p2, p1 - p3)
if isinstance(elem.theta_mcid, float):
material_theta_array[ieid] = elem.theta_mcid
else:
mcid_array[ieid] = elem.theta_mcid
dim = 2
elif etype == 'CQUAD8':
nids = elem.nodes
pid = elem.pid
if None in nids:
cell_type = cell_type_tri3
inids = np.searchsorted(all_nids, nids[:4])
nids = nids[:4]
p1, p2, p3, p4 = xyz_cid0[inids, :]
nnodes = 4
else:
cell_type = cell_type_tri6
inids = np.searchsorted(all_nids, nids)
p1, p2, p3, p4 = xyz_cid0[inids[:4], :]
nnodes = 8
out = quad_quality(elem, p1, p2, p3, p4)
(areai, taper_ratioi, area_ratioi, max_skew, aspect_ratio,
min_thetai, max_thetai, dideal_thetai, min_edge_lengthi, max_warp) = out
normali = np.cross(p1 - p3, p2 - p4)
if isinstance(elem.theta_mcid, float):
material_theta_array[ieid] = elem.theta_mcid
else:
mcid_array[ieid] = elem.theta_mcid
nnodes = 4
dim = 2
elif etype == 'CSHEAR':
nids = elem.nodes
pid = elem.pid
cell_type = cell_type_quad4 #9
inids = np.searchsorted(all_nids, nids)
p1, p2, p3, p4 = xyz_cid0[inids, :]
out = quad_quality(elem, p1, p2, p3, p4)
(areai, taper_ratioi, area_ratioi, max_skew, aspect_ratio,
min_thetai, max_thetai, dideal_thetai, min_edge_lengthi, max_warp) = out
normali = np.cross(p1 - p3, p2 - p4)
nnodes = 4
dim = 2
elif etype == 'CTETRA':
nids = elem.nodes
pid = elem.pid
if None in nids:
cell_type = cell_type_tetra4
nids = nids[:4]
nnodes = 4
else:
cell_type = cell_type_tetra10
nnodes = 10
inids = np.searchsorted(all_nids, nids)
min_thetai, max_thetai, dideal_thetai, min_edge_lengthi = get_min_max_theta(
_ctetra_faces, nids, nid_map, xyz_cid0)
dim = 3
elif etype == 'CHEXA':
nids = elem.nodes
pid = elem.pid
if None in nids:
cell_type = cell_type_hexa8
nids = nids[:8]
nnodes = 8
else:
cell_type = cell_type_hexa20
nnodes = 20
inids = np.searchsorted(all_nids, nids)
min_thetai, max_thetai, dideal_thetai, min_edge_lengthi = get_min_max_theta(
_chexa_faces, nids, nid_map, xyz_cid0)
dim = 3
elif etype == 'CPENTA':
nids = elem.nodes
pid = elem.pid
if None in nids:
cell_type = cell_type_penta6
nids = nids[:6]
nnodes = 6
else:
cell_type = cell_type_penta15
nnodes = 15
inids = np.searchsorted(all_nids, nids)
min_thetai, max_thetai, dideal_thetai, min_edge_lengthi = get_min_max_theta(
_cpenta_faces, nids, nid_map, xyz_cid0)
dim = 3
elif etype == 'CPYRAM':
# TODO: assuming 5
nids = elem.nodes
pid = elem.pid
if None in nids:
cell_type = cell_type_pyram5
nids = nids[:5]
nnodes = 5
else:
cell_type = cell_type_penta15
nnodes = 15
inids = np.searchsorted(all_nids, nids)
min_thetai, max_thetai, dideal_thetai, min_edge_lengthi = get_min_max_theta(
_cpyram_faces, nids, nid_map, xyz_cid0)
dim = 3
elif etype in ['CELAS2', 'CELAS4', 'CDAMP4']:
# these can have empty nodes and have no property
# CELAS1: 1/2 GRID/SPOINT and pid
# CELAS2: 1/2 GRID/SPOINT, k, ge, and s
# CELAS3: 1/2 SPOINT and pid
# CELAS4: 1/2 SPOINT and k
nids = elem.nodes
assert nids[0] != nids[1]
if None in nids:
assert nids[0] is not None, nids
assert nids[1] is None, nids
nids = [nids[0]]
cell_type = cell_type_point
nnodes = 1
else:
nids = elem.nodes
assert nids[0] != nids[1]
cell_type = cell_type_line
nnodes = 2
inids = np.searchsorted(all_nids, nids)
pid = 0
dim = 0
elif etype in ['CBUSH', 'CBUSH1D', 'CBUSH2D',
'CELAS1', 'CELAS3',
'CDAMP1', 'CDAMP2', 'CDAMP3', 'CDAMP5',
'CFAST', 'CGAP', 'CVISC']:
nids = elem.nodes
assert nids[0] != nids[1]
assert None not in nids, 'nids=%s\n%s' % (nids, elem)
pid = elem.pid
cell_type = cell_type_line
inids = np.searchsorted(all_nids, nids)
nnodes = 2
dim = 0
elif etype in ['CBAR', 'CBEAM']:
nids = elem.nodes
pid = elem.pid
pid_ref = model.Property(pid)
areai = pid_ref.Area()
cell_type = cell_type_line
inids = np.searchsorted(all_nids, nids)
p1, p2 = xyz_cid0[inids, :]
min_edge_lengthi = norm(p2 - p1)
nnodes = 2
dim = 1
elif etype in ['CROD', 'CTUBE']:
nids = elem.nodes
pid = elem.pid
pid_ref = model.Property(pid)
areai = pid_ref.Area()
cell_type = cell_type_line
inids = np.searchsorted(all_nids, nids)
p1, p2 = xyz_cid0[inids, :]
min_edge_lengthi = norm(p2 - p1)
nnodes = 2
dim = 1
elif etype == 'CONROD':
nids = elem.nodes
areai = elem.Area()
pid = 0
cell_type = cell_type_line
inids = np.searchsorted(all_nids, nids)
p1, p2 = xyz_cid0[inids, :]
min_edge_lengthi = norm(p2 - p1)
nnodes = 2
dim = 1
#------------------------------
# rare
#elif etype == 'CIHEX1':
#nids = elem.nodes
#pid = elem.pid
#cell_type = cell_type_hexa8
#inids = np.searchsorted(all_nids, nids)
#min_thetai, max_thetai, dideal_thetai, min_edge_lengthi = get_min_max_theta(
#_chexa_faces, nids, nid_map, xyz_cid0)
#nnodes = 8
#dim = 3
elif etype == 'CHBDYE':
#self.eid_map[eid] = ieid
eid_solid = elem.eid2
side = elem.side
element_solid = model.elements[eid_solid]
mapped_inids = SIDE_MAP[element_solid.type][side]
side_inids = [nid - 1 for nid in mapped_inids]
nodes = element_solid.node_ids
pid = 0
nnodes = len(side_inids)
nids = [nodes[inid] for inid in side_inids]
inids = np.searchsorted(all_nids, nids)
if len(side_inids) == 4:
cell_type = cell_type_quad4
else:
msg = 'element_solid:\n%s' % (str(element_solid))
msg += 'mapped_inids = %s\n' % mapped_inids
msg += 'side_inids = %s\n' % side_inids
msg += 'nodes = %s\n' % nodes
#msg += 'side_nodes = %s\n' % side_nodes
raise NotImplementedError(msg)
elif etype == 'GENEL':
nids = []
if len(elem.ul_nodes):
nids.append(elem.ul_nodes)
if len(elem.ud_nodes):
nids.append(elem.ud_nodes)
nids = np.unique(np.hstack(nids))
#print(elem.get_stats())
nids = nids[:2]
areai = np.nan
pid = 0
cell_type = cell_type_line
inids = np.searchsorted(all_nids, nids)
p1, p2 = xyz_cid0[inids, :]
min_edge_lengthi = norm(p2 - p1)
nnodes = len(nids)
dim = 1
else:
#raise NotImplementedError(elem)
skipped_etypes.add(etype)
nelements -= 1
continue
#for nid in nids:
#assert isinstance(nid, integer_types), 'not an integer. nids=%s\n%s' % (nids, elem)
#assert nid != 0, 'not a positive integer. nids=%s\n%s' % (nids, elem)
assert inids is not None
if not np.array_equal(all_nids[inids], nids):
msg = 'all_nids[inids]=%s nids=%s\n%s' % (all_nids[inids], nids, elem)
raise RuntimeError(msg)
assert cell_type is not None
assert cell_offset is not None
assert eid is not None
assert pid is not None
assert dim is not None
assert nnodes is not None
nids_list.append(nnodes)
nids_list.extend(inids)
normals[ieid] = normali
eids_array[ieid] = eid
pids_array[ieid] = pid
dim_array[ieid] = dim
cell_types_array[ieid] = cell_type
cell_offsets_array[ieid] = cell_offset # I assume the problem is here
cell_offset += nnodes + 1
self.eid_map[eid] = ieid
min_interior_angle[ieid] = min_thetai
max_interior_angle[ieid] = max_thetai
dideal_theta[ieid] = dideal_thetai
max_skew_angle[ieid] = max_skew
max_warp_angle[ieid] = max_warp
max_aspect_ratio[ieid] = aspect_ratio
area[ieid] = areai
area_ratio[ieid] = area_ratioi
taper_ratio[ieid] = taper_ratioi
min_edge_length[ieid] = min_edge_lengthi
ieid += 1
#print('self.eid_map =', self.eid_map)
icells_zero = np.where(cell_types_array == 0)[0]
# TODO: I'd like to get rid of deep=1, but it'll crash the edges
deep = 1
if len(icells_zero):
icells = np.where(cell_types_array != 0)[0]
if len(icells) == 0:
self.log.error('skipped_etypes = %s' % skipped_etypes)
raise RuntimeError('there are no elements...')
eids_array = eids_array[icells]
pids_array = pids_array[icells]
#dim_array = pids_array[dim_array]
cell_types_array = cell_types_array[icells]
cell_offsets_array = cell_offsets_array[icells]
nnodes_array = nnodes_array[icells]
normals = normals[icells, :]
#deep = 1
#print('deep = %s' % deep)
if skipped_etypes:
self.log.error('skipped_etypes = %s' % list(skipped_etypes))
#print('skipped_etypes = %s' % skipped_etypes)
if len(pids_array) != nelements:
msg = 'nelements=%s len(pids_array)=%s' % (nelements, len(pids_array))
raise RuntimeError(msg)
if len(cell_offsets_array) != nelements:
msg = 'nelements=%s len(cell_offsets_array)=%s' % (nelements, len(cell_offsets_array))
raise RuntimeError(msg)
nids_array = np.array(nids_list, dtype=dtype)
#-----------------------------------------------------------------
# saving some data members
self.element_ids = eids_array
#print('cell_types_array* = ', cell_types_array.tolist())
#print('cell_offsets_array* = ', cell_offsets_array.tolist())
#-----------------------------------------------------------------
# build the grid
#self.log.info('nids_array = %s' % nids_array)
#self.log.info('cell_offsets_array = %s' % cell_offsets_array)
#self.log.info('cell_types_array = %s' % cell_types_array)
# Create the array of cells
cells_id_type = numpy_to_vtkIdTypeArray(nids_array, deep=1)
vtk_cells = vtk.vtkCellArray()
vtk_cells.SetCells(nelements, cells_id_type)
# Cell types
vtk_cell_types = numpy_to_vtk(
cell_types_array, deep=deep,
array_type=vtk.vtkUnsignedCharArray().GetDataType())
vtk_cell_offsets = numpy_to_vtk(cell_offsets_array, deep=deep,
array_type=vtk.VTK_ID_TYPE)
grid = self.grid
#grid = vtk.vtkUnstructuredGrid()
grid.SetCells(vtk_cell_types, vtk_cell_offsets, vtk_cells)
#-----------------------------------------------------------------
# fill the results
nid_to_pid_map = None
self.isubcase_name_map = {1: ['Nastran', '']}
icase = 0
cases = OrderedDict()
form = ['Geometry', None, []]
form0 = form[2]
subcase_id = 0
#nids_set = True
#if nids_set:
# this intentionally makes a deepcopy
#nids = np.array(nid_cp_cd[:, 0])
# this intentionally makes a deepcopy
cds = np.array(nid_cp_cd[:, 2])
colormap = settings.colormap
nid_res = GuiResult(subcase_id, 'NodeID', 'NodeID', 'node', all_nids,
mask_value=0,
nlabels=None,
labelsize=None,
ncolors=None,
colormap=colormap,
data_format=None,
uname='GuiResult')
cases[icase] = (nid_res, (0, 'Node ID'))
form0.append(('Node ID', icase, []))
icase += 1
if cds.max() > 0:
cd_res = GuiResult(0, header='NodeCd', title='NodeCd',
location='node', scalar=cds)
cases[icase] = (cd_res, (0, 'NodeCd'))
form0.append(('NodeCd', icase, []))
icase += 1
eid_res = GuiResult(subcase_id, 'ElementID', 'ElementID', 'centroid', eids_array,
mask_value=0,
nlabels=None,
labelsize=None,
ncolors=None,
colormap=colormap,
data_format=None,
uname='GuiResult')
cases[icase] = (eid_res, (0, 'ElementID'))
form0.append(('ElementID', icase, []))
icase += 1
is_element_dim = True
#if len(np.unique(dim_array)) > 1:
#dim_res = GuiResult(subcase_id, 'ElementDim', 'ElementDim', 'centroid', dim_array,
#mask_value=-1,
#nlabels=None,
#labelsize=None,
#ncolors=None,
#colormap=colormap,
#data_format=None,
#uname='GuiResult')
#cases[icase] = (dim_res, (0, 'ElementDim'))
#form0.append(('ElementDim', icase, []))
#icase += 1
if nnodes_array.max() > -1:
nnodes_res = GuiResult(subcase_id, 'NNodes/Elem', 'NNodes/Elem',
'centroid', nnodes_array,
mask_value=0,
nlabels=None,
labelsize=None,
ncolors=None,
colormap=colormap,
data_format=None,
uname='GuiResult')
cases[icase] = (nnodes_res, (0, 'NNodes/Elem'))
form0.append(('NNodes/Elem', icase, []))
icase += 1
#pid_res = GuiResult(subcase_id, 'PropertyID', 'PropertyID', 'centroid', pids_array,
#mask_value=0,
#nlabels=None,
#labelsize=None,
#ncolors=None,
#colormap=colormap,
#data_format=None,
#uname='GuiResult')
#cases[icase] = (pid_res, (0, 'PropertyID'))
#form0.append(('PropertyID', icase, []))
#icase += 1
if len(model.properties) and nelements and settings.nastran_is_properties:
icase, upids, pcomp, pshell, is_pshell_pcomp = self._build_properties(
model, nelements, eids_array, pids_array, cases, form0, icase)
icase = _build_materials(model, pcomp, pshell, is_pshell_pcomp,
cases, form0, icase)
try:
icase = _build_optimization(model, pids_array, upids,
nelements, cases, form0, icase)
except:
#raise
s = StringIO()
traceback.print_exc(file=s)
sout = s.getvalue()
self.gui.log_error(sout)
print(sout)
#if isgreater_int(mcid_array, -1):
#mcid_res = GuiResult(subcase_id, 'Material Coordinate System', 'MaterialCoord',
#'centroid', mcid_array,
#mask_value=-1,
#nlabels=None,
#labelsize=None,
#ncolors=None,
#colormap=colormap,
#data_format=None,
#uname='GuiResult')
#cases[icase] = (mcid_res, (0, 'Material Coordinate System'))
#form0.append(('Material Coordinate System', icase, []))
#icase += 1
#if np.isfinite(theta_array).any():
#print('np.nanmax(theta_array) =', np.nanmax(theta_array))
#theta_res = GuiResult(subcase_id, 'Theta', 'Theta', 'centroid', theta_array,
#mask_value=None,
#nlabels=None,
#labelsize=None,
#ncolors=None,
#colormap=colormap,
#data_format=None,
#uname='GuiResult')
#cases[icase] = (theta_res, (0, 'Theta'))
#form0.append(('Theta', icase, []))
#icase += 1
normal_mag = underflow_norm(normals, axis=1)
assert len(normal_mag) == nelements
normals /= normal_mag.reshape(nelements, 1)
i_not_nan = np.isnan(normal_mag)
#if self.make_offset_normals_dim and nelements:
#material_coord = None
#icase, normals = _build_normals_quality(
#model, self.gui.eid_map, nelements, cases, form0, icase,
#xyz_cid0, material_coord, material_theta,
#min_interior_angle, max_interior_angle, dideal_theta,
#area, max_skew_angle, taper_ratio,
#max_warp_angle, area_ratio, min_edge_length, max_aspect_ratio,
#make_offset_normals_dim=self.make_offset_normals_dim)
#self.normals = normals
#----------------------------------------------------------
is_shell = False
if False in i_not_nan:
#max_normal = np.nanmax(normal_mag[i_not_nan])
#is_shell = np.abs(max_normal) > 0.
is_shell = True
is_solid = isfinite_and_nonzero(max_interior_angle)
#print('is_shell=%s is_solid=%s' % (is_shell, is_solid))
if is_shell:
nx_res = GuiResult(
0, header='NormalX', title='NormalX',
location='centroid', scalar=normals[:, 0], data_format='%.2f')
ny_res = GuiResult(
0, header='NormalY', title='NormalY',
location='centroid', scalar=normals[:, 1], data_format='%.2f')
nz_res = GuiResult(
0, header='NormalZ', title='NormalZ',
location='centroid', scalar=normals[:, 2], data_format='%.2f')
nxyz_res = NormalResult(0, 'Normals', 'Normals',
nlabels=2, labelsize=5, ncolors=2,
colormap=colormap, data_format='%.1f',
uname='NormalResult')
area_res = GuiResult(0, header='Area', title='Area',
location='centroid', scalar=area)
min_edge_length_res = GuiResult(
0, header='Min Edge Length', title='Min Edge Length',
location='centroid', scalar=min_edge_length)
min_theta_res = GuiResult(
0, header='Min Interior Angle', title='Min Interior Angle',
location='centroid', scalar=np.degrees(min_interior_angle))
max_theta_res = GuiResult(
0, header='Max Interior Angle', title='Max Interior Angle',
location='centroid', scalar=np.degrees(max_interior_angle))
dideal_theta_res = GuiResult(
0, header='Delta Ideal Angle', title='Delta Ideal Angle',
location='centroid', scalar=np.degrees(dideal_theta))
skew = np.degrees(max_skew_angle)
skew_res = GuiResult(
0, header='Max Skew Angle', title='MaxSkewAngle',
location='centroid', scalar=skew)
aspect_res = GuiResult(
0, header='Aspect Ratio', title='AspectRatio',
location='centroid', scalar=max_aspect_ratio)
form_checks = []
form0.append(('Element Checks', None, form_checks))
if is_element_dim:
form_checks.append(('ElementDim', icase, []))
if self.make_offset_normals_dim and self.make_nnodes_result and 0: # pragma: no cover
nnodes_res = GuiResult(
0, header='NNodes/Elem', title='NNodes/Elem',
location='centroid', scalar=nnodes_array)
form_checks.append(('NNodes', icase + 1, []))
cases[icase + 1] = (nnodes_res, (0, 'NNodes'))
icase += 1
if self.make_offset_normals_dim or 1:
cases[icase + 1] = (nx_res, (0, 'NormalX'))
cases[icase + 2] = (ny_res, (0, 'NormalY'))
cases[icase + 3] = (nz_res, (0, 'NormalZ'))
cases[icase + 4] = (nxyz_res, (0, 'Normal'))
form_checks.append(('NormalX', icase + 1, []))
form_checks.append(('NormalY', icase + 2, []))
form_checks.append(('NormalZ', icase + 3, []))
form_checks.append(('Normal', icase + 4, []))
cases[icase + 5] = (area_res, (0, 'Area'))
cases[icase + 6] = (min_edge_length_res, (0, 'Min Edge Length'))
cases[icase + 7] = (min_theta_res, (0, 'Min Interior Angle'))
cases[icase + 8] = (max_theta_res, (0, 'Max Interior Angle'))
cases[icase + 9] = (dideal_theta_res, (0, 'Delta Ideal Angle'))
cases[icase + 10] = (skew_res, (0, 'Max Skew Angle'))
cases[icase + 11] = (aspect_res, (0, 'Aspect Ratio'))
form_checks.append(('Area', icase + 5, []))
form_checks.append(('Min Edge Length', icase + 6, []))
form_checks.append(('Min Interior Angle', icase + 7, []))
form_checks.append(('Max Interior Angle', icase + 8, []))
form_checks.append(('Delta Ideal Angle', icase + 9, []))
form_checks.append(('Max Skew Angle', icase + 10, []))
form_checks.append(('Aspect Ratio', icase + 11, []))
icase += 12
if np.any(np.isfinite(area_ratio)) and np.nanmax(area_ratio) > 1.:
arearatio_res = GuiResult(
0, header='Area Ratio', title='Area Ratio',
location='centroid', scalar=area_ratio)
cases[icase] = (arearatio_res, (0, 'Area Ratio'))
form_checks.append(('Area Ratio', icase, []))
icase += 1
if np.any(np.isfinite(taper_ratio)) and np.nanmax(taper_ratio) > 1.:
taperratio_res = GuiResult(
0, header='Taper Ratio', title='Taper Ratio',
location='centroid', scalar=taper_ratio)
cases[icase] = (taperratio_res, (0, 'Taper Ratio'))
form_checks.append(('Taper Ratio', icase, []))
icase += 1
if isfinite_and_nonzero(max_warp_angle):
warp_res = GuiResult(
0, header='Max Warp Angle', title='MaxWarpAngle',
location='centroid', scalar=np.degrees(max_warp_angle))
cases[icase + 4] = (warp_res, (0, 'Max Warp Angle'))
form_checks.append(('Max Warp Angle', icase, []))
icase += 1
#if (np.abs(xoffset).max() > 0.0 or np.abs(yoffset).max() > 0.0 or
#np.abs(zoffset).max() > 0.0):
# offsets
#offset_res = GuiResult(
#0, header='Offset', title='Offset',
#location='centroid', scalar=offset, data_format='%g')
#offset_x_res = GuiResult(
#0, header='OffsetX', title='OffsetX',
#location='centroid', scalar=xoffset, data_format='%g')
#offset_y_res = GuiResult(
#0, header='OffsetY', title='OffsetY',
#location='centroid', scalar=yoffset, data_format='%g')
#offset_z_res = GuiResult(
#0, header='OffsetZ', title='OffsetZ',
#location='centroid', scalar=zoffset, data_format='%g')
#cases[icase] = (offset_res, (0, 'Offset'))
#cases[icase + 1] = (offset_x_res, (0, 'OffsetX'))
#cases[icase + 2] = (offset_y_res, (0, 'OffsetY'))
#cases[icase + 3] = (offset_z_res, (0, 'OffsetZ'))
#form_checks.append(('Offset', icase, []))
#form_checks.append(('OffsetX', icase + 1, []))
#form_checks.append(('OffsetY', icase + 2, []))
#form_checks.append(('OffsetZ', icase + 3, []))
#icase += 4
if self.make_xyz or IS_TESTING:
x_res = GuiResult(
0, header='X', title='X',
location='node', scalar=xyz_cid0[:, 0], data_format='%g')
y_res = GuiResult(
0, header='Y', title='Y',
location='node', scalar=xyz_cid0[:, 1], data_format='%g')
z_res = GuiResult(
0, header='Z', title='Z',
location='node', scalar=xyz_cid0[:, 2], data_format='%g')
cases[icase] = (x_res, (0, 'X'))
cases[icase + 1] = (y_res, (0, 'Y'))
cases[icase + 2] = (z_res, (0, 'Z'))
form_checks.append(('X', icase + 0, []))
form_checks.append(('Y', icase + 1, []))
form_checks.append(('Z', icase + 2, []))
icase += 3
elif is_solid:
# only solid elements
form_checks = []
form0.append(('Element Checks', None, form_checks))
min_edge_length_res = GuiResult(
0, header='Min Edge Length', title='Min Edge Length',
location='centroid', scalar=min_edge_length)
min_theta_res = GuiResult(
0, header='Min Interior Angle', title='Min Interior Angle',
location='centroid', scalar=np.degrees(min_interior_angle))
max_theta_res = GuiResult(
0, header='Max Interior Angle', title='Max Interior Angle',
location='centroid', scalar=np.degrees(max_interior_angle))
skew = 90. - np.degrees(max_skew_angle)
#skew_res = GuiResult(0, header='Max Skew Angle', title='MaxSkewAngle',
#location='centroid', scalar=skew)
if is_element_dim:
form_checks.append(('ElementDim', icase, []))
form_checks.append(('Min Edge Length', icase + 1, []))
form_checks.append(('Min Interior Angle', icase + 2, []))
form_checks.append(('Max Interior Angle', icase + 3, []))
form_checks.append(('Max Skew Angle', icase + 4, []))
cases[icase + 1] = (min_edge_length_res, (0, 'Min Edge Length'))
cases[icase + 2] = (min_theta_res, (0, 'Min Interior Angle'))
cases[icase + 3] = (max_theta_res, (0, 'Max Interior Angle'))
#cases[icase + 4] = (skew_res, (0, 'Max Skew Angle'))
icase += 4
else:
form0.append(('ElementDim', icase, []))
icase += 1
if isgreater_int(mcid_array, -1):
material_coord_res = GuiResult(
0, header='MaterialCoord', title='MaterialCoord',
location='centroid',
scalar=mcid_array, mask_value=-1, data_format='%i')
cases[icase] = (material_coord_res, (0, 'MaterialCoord'))
form0.append(('MaterialCoord', icase, []))
icase += 1
if isfinite(material_theta_array):
material_theta_res = GuiResult(
0, header='MaterialTheta', title='MaterialTheta',
location='centroid',
scalar=material_theta_array, data_format='%.3f')
cases[icase] = (material_theta_res, (0, 'MaterialTheta'))
form0.append(('MaterialTheta', icase, []))
icase += 1
#print(normals)
#----------------------------------------------------------
# finishing up vtk
if nelements and isfinite(min_edge_length):
mean_edge_length = np.nanmean(min_edge_length)
self.set_glyph_scale_factor(mean_edge_length * 2.5) # was 1.5
grid.Modified()
#----------------------------------------------------------
# finishing up parameters
self.node_ids = all_nids
self.normals = normals
return nid_to_pid_map, icase, cases, form
def map_elements(self, xyz_cid0, nid_cp_cd, nid_map, model, j, dim_max,
plot=True, xref_loads=True):
"""
Creates the elements
nid_cp_cd : (nnodes, 3) int ndarray
the node_id and coordinate systems corresponding to xyz_cid0
used for setting the NodeID and CD coordinate results
xyz_cid0 : (nnodes, 3) float ndarray
the global xyz locations
nid_map : dict[nid] : nid_index
nid : int
the GRID/SPOINT/EPOINT id
nid_index : int
the index for the GRID/SPOINT/EPOINT in xyz_cid0
model : BDF()
the model object
j : int
???
dim_max : float
the max(dx, dy, dz) dimension
use for ???
"""
grid = self.gui.grid
settings = self.gui.settings
if IS_TESTING:
self._map_elements3(nid_map, model, j, dim_max,
nid_cp_cd, xref_loads=xref_loads)
if settings.nastran_is_element_quality:
out = self._map_elements1_quality(model, xyz_cid0, nid_cp_cd, dim_max, nid_map, j)
else:
out = self._map_elements1_no_quality(model, xyz_cid0, nid_cp_cd, dim_max, nid_map, j)
(nid_to_pid_map, xyz_cid0, superelements, pids, nelements,
material_coord, material_theta,
area, min_interior_angle, max_interior_angle, max_aspect_ratio,
max_skew_angle, taper_ratio, dideal_theta,
area_ratio, min_edge_length, max_warp_angle) = out
#self.grid_mapper.SetResolveCoincidentTopologyToPolygonOffset()
grid.Modified()
cases = OrderedDict()
self.gui.isubcase_name_map = {1: ['Nastran', '']}
icase = 0
form = ['Geometry', None, []]
form0 = form[2]
#new_cases = True
# set to True to enable node_ids as an result
nids_set = True
if nids_set and self.gui.nnodes > 0:
# this intentionally makes a deepcopy
nids = np.array(nid_cp_cd[:, 0])
cds = np.array(nid_cp_cd[:, 2])
nid_res = GuiResult(0, header='NodeID', title='NodeID',
location='node', scalar=nids)
cases[icase] = (nid_res, (0, 'NodeID'))
form0.append(('NodeID', icase, []))
icase += 1
if len(np.unique(cds)) > 1:
cd_res = GuiResult(0, header='NodeCd', title='NodeCd',
location='node', scalar=cds)
cases[icase] = (cd_res, (0, 'NodeCd'))
form0.append(('NodeCd', icase, []))
icase += 1
self.node_ids = nids
# set to True to enable elementIDs as a result
eids_set = True
if eids_set and nelements:
eids = np.zeros(nelements, dtype='int32')
eid_map = self.gui.eid_map
for (eid, eid2) in eid_map.items():
eids[eid2] = eid
eid_res = GuiResult(0, header='ElementID', title='ElementID',
location='centroid', scalar=eids, mask_value=0)
cases[icase] = (eid_res, (0, 'ElementID'))
form0.append(('ElementID', icase, []))
icase += 1
self.element_ids = eids
if superelements is not None:
nid_res = GuiResult(0, header='SuperelementID', title='SuperelementID',
location='centroid', scalar=superelements)
cases[icase] = (nid_res, (0, 'SuperelementID'))
form0.append(('SuperelementID', icase, []))
icase += 1
# subcase_id, resultType, vector_size, location, dataFormat
if len(model.properties) and nelements and settings.nastran_is_properties:
icase, upids, pcomp, pshell, is_pshell_pcomp = self._build_properties(
model, nelements, eids, pids, cases, form0, icase)
icase = _build_materials(model, pcomp, pshell, is_pshell_pcomp,
cases, form0, icase)
try:
icase = _build_optimization(model, pids, upids, nelements,
cases, form0, icase)
except:
if IS_TESTING or self.is_testing_flag:
raise
s = StringIO()
traceback.print_exc(file=s)
sout = s.getvalue()
self.gui.log_error(sout)
print(sout)
#traceback.print_exc(file=sys.stdout)
#etype, value, tb = sys.exc_info
#print(etype, value, tb)
#raise RuntimeError('Optimization Parsing Error') from e
#traceback.print_tb(e)
#print(e)
#print('nelements=%s eid_map=%s' % (nelements, self.eid_map))
if nelements and isfinite(min_edge_length):
mean_edge_length = np.nanmean(min_edge_length) * 2.5
self.gui.set_glyph_scale_factor(mean_edge_length) # was 1.5
if (self.make_offset_normals_dim or settings.nastran_is_element_quality) and nelements:
icase, normals = _build_normals_quality(
settings, model, self.gui.eid_map, nelements, cases, form0, icase,
xyz_cid0,
material_coord, material_theta,
min_interior_angle, max_interior_angle, dideal_theta,
area, max_skew_angle, taper_ratio,
max_warp_angle, area_ratio, min_edge_length, max_aspect_ratio,
make_offset_normals_dim=self.make_offset_normals_dim)
self.normals = normals
return nid_to_pid_map, icase, cases, form
def _build_mcid_vectors(self, model: BDF, nplies: int):
"""creates the shell material coordinate vectors"""
etype = 3 # vtkLine
nodes, bars = export_mcids_all(model, eids=None, log=None, debug=False)
for iply, nodesi in nodes.items():
barsi = bars[iply]
if iply == -1:
name = 'element coord'
else:
name = f'mcid ply={iply+1}'
nbars = len(barsi)
if nbars == 0:
# isotropic
continue
assert nbars > 0, model.card_count
is_visible = False
self.gui.create_alternate_vtk_grid(
name, color=RED_FLOAT, line_width=3, opacity=1.0,
representation='surface', is_visible=is_visible, is_pickable=False)
grid = self.gui.alt_grids[name]
grid.Allocate(nbars, 1000)
nodes_array = np.array(nodesi, dtype='float32')
elements = np.array(barsi, dtype='int32')
assert elements.min() == 0, elements.min()
points = numpy_to_vtk_points(nodes_array, points=None, dtype='<f', deep=1)
grid.SetPoints(points)
create_vtk_cells_of_constant_element_type(grid, elements, etype)
return
def _build_plotels(self, model):
"""creates the plotel actor"""
nplotels = len(model.plotels)
if nplotels:
# sorting these don't matter, but why not?
#lines = [element.node_ids for unused_eid, element in sorted(model.plotels.items())]
lines = []
for unused_eid, element in sorted(model.plotels.items()):
node_ids = element.node_ids
lines.append(node_ids)
lines = np.array(lines, dtype='int32')
self.gui.create_alternate_vtk_grid(
'plotel', color=RED_FLOAT, line_width=2, opacity=0.8,
point_size=5, representation='wire', is_visible=True)
self._add_nastran_lines_to_grid('plotel', lines, model)
def _map_elements1_no_quality(self, model, xyz_cid0, nid_cp_cd, unused_dim_max, nid_map, j):
"""
Helper for map_elements
No element quality
"""
assert nid_map is not None
min_interior_angle = None
max_interior_angle = None
max_aspect_ratio = None
max_skew_angle = None
taper_ratio = None
dideal_theta = None
area_ratio = None
min_edge_length = None
max_warp_angle = None
area = None
if xyz_cid0 is None:
superelements = None
nid_to_pid_map = None
pids = None
nelements = None
material_coord = None
material_theta = None
out = (
nid_to_pid_map, xyz_cid0, superelements, pids, nelements,
material_coord, material_theta,
area, min_interior_angle, max_interior_angle, max_aspect_ratio,
max_skew_angle, taper_ratio, dideal_theta,
area_ratio, min_edge_length, max_warp_angle,
)
return out
xyz_cid0 = self.xyz_cid0
nids = nid_cp_cd[:, 0]
#sphere_size = self._get_sphere_size(dim_max)
# :param i: the element id in grid
# :param j: the element id in grid2
i = 0
#nids = self.eid_to_nid_map[eid]
self.eid_to_nid_map = {}
# the list of all pids
#pids = []
# pid = pids_dict[eid]
pids_dict = {}
elements, nelements, superelements = get_elements_nelements_unvectorized(model)
pids = np.zeros(nelements, 'int32')
material_coord = np.full(nelements, -1, dtype='int32')
material_theta = np.full(nelements, np.nan, dtype='float32')
# pids_good = []
# pids_to_keep = []
# pids_btm = []
# pids_to_drop = []
# 3
# | \
# | \
# | \
# 1------2
# these normals point inwards
# 4
# / | \
# / | \
# 3-------2
# \ | /
# \ | /
# 1
#_ctetra_faces = (
#(0, 1, 2), # (1, 2, 3),
#(0, 3, 1), # (1, 4, 2),
#(0, 3, 2), # (1, 3, 4),
#(1, 3, 2), # (2, 4, 3),
#)
# these normals point inwards
#
#
#
#
# /4-----3
# / /
# / 5 /
# / \ /
# / \ /
# 1---------2
#_cpyram_faces = (
#(0, 1, 2, 3), # (1, 2, 3, 4),
#(1, 4, 2), # (2, 5, 3),
#(2, 4, 3), # (3, 5, 4),
#(0, 3, 4), # (1, 4, 5),
#(0, 4, 1), # (1, 5, 2),
#)
# these normals point inwards
# /6
# / | \
# / | \
# 3\ | \
# | \ /4-----5
# | \/ /
# | / \ /
# | / \ /
# | / \ /
# 1---------2
#_cpenta_faces = (
#(0, 2, 1), # (1, 3, 2),
#(3, 4, 5), # (4, 5, 6),
#(0, 1, 4, 3), # (1, 2, 5, 4), # bottom
#(1, 2, 5, 4), # (2, 3, 6, 5), # right
#(0, 3, 5, 2), # (1, 4, 6, 3), # left
#)
# these normals point inwards
# 8----7
# /| /|
# / | / |
# / 5-/--6
# 4-----3 /
# | / | /
# | / | /
# 1-----2
#_chexa_faces = (
#(4, 5, 6, 7), # (5, 6, 7, 8),
#(0, 3, 2, 1), # (1, 4, 3, 2),
#(1, 2, 6, 5), # (2, 3, 7, 6),
#(2, 3, 7, 6), # (3, 4, 8, 7),
#(0, 4, 7, 3), # (1, 5, 8, 4),
#(0, 6, 5, 4), # (1, 7, 6, 5),
#)
line_type = 3 # vtk.vtkLine().GetCellType()
nid_to_pid_map = defaultdict(list)
pid = 0
log = self.log
grid = self.gui.grid
self._build_plotels(model)
#print("map_elements...")
eid_to_nid_map = self.eid_to_nid_map
eid_map = self.gui.eid_map
for (eid, element) in sorted(elements.items()):
eid_map[eid] = i
if i % 5000 == 0 and i > 0:
print(' map_elements (no quality) = %i' % i)
etype = element.type
# if element.Pid() >= 82:
# continue
# if element.Pid() in pids_to_drop:
# continue
# if element.Pid() not in pids_to_keep:
# continue
# if element.pid.type == 'PSOLID':
# continue
pid = np.nan
if isinstance(element, (CTRIA3, CTRIAR, CTRAX3, CPLSTN3, CPLSTS3)):
if isinstance(element, (CTRIA3, CTRIAR)):
mcid, theta = get_shell_material_coord(element)
material_coord[i] = mcid
material_theta[i] = theta
elem = vtkTriangle()
node_ids = element.node_ids
pid = element.Pid()
eid_to_nid_map[eid] = node_ids
for nid in node_ids:
if nid is not None:
nid_to_pid_map[nid].append(pid)
n1, n2, n3 = [nid_map[nid] for nid in node_ids]
#p1 = xyz_cid0[n1, :]
#p2 = xyz_cid0[n2, :]
#p3 = xyz_cid0[n3, :]
elem.GetPointIds().SetId(0, n1)
elem.GetPointIds().SetId(1, n2)
elem.GetPointIds().SetId(2, n3)
grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
elif isinstance(element, (CTRIA6, CPLSTN6, CPLSTS6, CTRIAX)):
# the CTRIAX is a standard 6-noded element
if isinstance(element, CTRIA6):
mcid, theta = get_shell_material_coord(element)
material_coord[i] = mcid
material_theta[i] = theta
node_ids = element.node_ids
pid = element.Pid()
eid_to_nid_map[eid] = node_ids[:3]
for nid in node_ids:
if nid is not None:
nid_to_pid_map[nid].append(pid)
if None not in node_ids:
elem = vtkQuadraticTriangle()
elem.GetPointIds().SetId(3, nid_map[node_ids[3]])
elem.GetPointIds().SetId(4, nid_map[node_ids[4]])
elem.GetPointIds().SetId(5, nid_map[node_ids[5]])
else:
elem = vtkTriangle()
n1, n2, n3 = [nid_map[nid] for nid in node_ids[:3]]
#p1 = xyz_cid0[n1, :]
#p2 = xyz_cid0[n2, :]
#p3 = xyz_cid0[n3, :]
elem.GetPointIds().SetId(0, n1)
elem.GetPointIds().SetId(1, n2)
elem.GetPointIds().SetId(2, n3)
grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
elif isinstance(element, CTRIAX6):
# the CTRIAX6 is not a standard second-order triangle
#
# 5
# |\
# | \
# 6 4
# | \
# | \
# 1----2----3
#
#material_coord[i] = element.theta # TODO: no mcid
# midside nodes are required, nodes out of order
node_ids = element.node_ids
pid = element.Pid()
for nid in node_ids:
if nid is not None:
nid_to_pid_map[nid].append(pid)
if None not in node_ids:
elem = vtkQuadraticTriangle()
elem.GetPointIds().SetId(3, nid_map[node_ids[1]])
elem.GetPointIds().SetId(4, nid_map[node_ids[3]])
elem.GetPointIds().SetId(5, nid_map[node_ids[5]])
else:
elem = vtkTriangle()
n1 = nid_map[node_ids[0]]
n2 = nid_map[node_ids[2]]
n3 = nid_map[node_ids[4]]
#p1 = xyz_cid0[n1, :]
#p2 = xyz_cid0[n2, :]
#p3 = xyz_cid0[n3, :]
elem.GetPointIds().SetId(0, n1)
elem.GetPointIds().SetId(1, n2)
elem.GetPointIds().SetId(2, n3)
eid_to_nid_map[eid] = [node_ids[0], node_ids[2], node_ids[4]]
grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
elif isinstance(element, (CQUAD4, CSHEAR, CQUADR, CPLSTN4, CPLSTS4, CQUADX4)):
if isinstance(element, (CQUAD4, CQUADR)):
mcid, theta = get_shell_material_coord(element)
material_coord[i] = mcid
material_theta[i] = theta
node_ids = element.node_ids
pid = element.Pid()
for nid in node_ids:
if nid is not None:
nid_to_pid_map[nid].append(pid)
eid_to_nid_map[eid] = node_ids
try:
n1, n2, n3, n4 = [nid_map[nid] for nid in node_ids]
except KeyError: # pragma: no cover
print("node_ids =", node_ids)
print(str(element))
#print('nid_map = %s' % nid_map)
raise
#continue
#p1 = xyz_cid0[n1, :]
#p2 = xyz_cid0[n2, :]
#p3 = xyz_cid0[n3, :]
#p4 = xyz_cid0[n4, :]
elem = vtkQuad()
elem.GetPointIds().SetId(0, n1)
elem.GetPointIds().SetId(1, n2)
elem.GetPointIds().SetId(2, n3)
elem.GetPointIds().SetId(3, n4)
grid.InsertNextCell(9, elem.GetPointIds())
elif isinstance(element, (CQUAD8, CPLSTN8, CPLSTS8, CQUADX8)):
if isinstance(element, CQUAD8):
mcid, theta = get_shell_material_coord(element)
material_coord[i] = mcid
material_theta[i] = theta
node_ids = element.node_ids
pid = element.Pid()
for nid in node_ids:
if nid is not None:
nid_to_pid_map[nid].append(pid)
self.eid_to_nid_map[eid] = node_ids[:4]
n1, n2, n3, n4 = [nid_map[nid] for nid in node_ids[:4]]
#p1 = xyz_cid0[n1, :]
#p2 = xyz_cid0[n2, :]
#p3 = xyz_cid0[n3, :]
#p4 = xyz_cid0[n4, :]
if None not in node_ids:
elem = vtkQuadraticQuad()
elem.GetPointIds().SetId(4, nid_map[node_ids[4]])
elem.GetPointIds().SetId(5, nid_map[node_ids[5]])
elem.GetPointIds().SetId(6, nid_map[node_ids[6]])
elem.GetPointIds().SetId(7, nid_map[node_ids[7]])
else:
elem = vtkQuad()
elem.GetPointIds().SetId(0, n1)
elem.GetPointIds().SetId(1, n2)
elem.GetPointIds().SetId(2, n3)
elem.GetPointIds().SetId(3, n4)
grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
elif isinstance(element, (CQUAD, CQUADX)):
# CQUAD, CQUADX are 9 noded quads
mcid, theta = get_shell_material_coord(element)
material_coord[i] = mcid
material_theta[i] = theta
node_ids = element.node_ids
pid = element.Pid()
for nid in node_ids:
if nid is not None:
nid_to_pid_map[nid].append(pid)
self.eid_to_nid_map[eid] = node_ids[:4]
n1, n2, n3, n4 = [nid_map[nid] for nid in node_ids[:4]]
#p1 = xyz_cid0[n1, :]
#p2 = xyz_cid0[n2, :]
#p3 = xyz_cid0[n3, :]
#p4 = xyz_cid0[n4, :]
if None not in node_ids:
elem = vtk.vtkBiQuadraticQuad()
elem.GetPointIds().SetId(4, nid_map[node_ids[4]])
elem.GetPointIds().SetId(5, nid_map[node_ids[5]])
elem.GetPointIds().SetId(6, nid_map[node_ids[6]])
elem.GetPointIds().SetId(7, nid_map[node_ids[7]])
elem.GetPointIds().SetId(8, nid_map[node_ids[8]])
else:
elem = vtkQuad()
elem.GetPointIds().SetId(0, n1)
elem.GetPointIds().SetId(1, n2)
elem.GetPointIds().SetId(2, n3)
elem.GetPointIds().SetId(3, n4)
grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
elif isinstance(element, CTETRA4):
elem = vtkTetra()
node_ids = element.node_ids
pid = element.Pid()
for nid in node_ids:
nid_to_pid_map[nid].append(pid)
eid_to_nid_map[eid] = node_ids[:4]
elem.GetPointIds().SetId(0, nid_map[node_ids[0]])
elem.GetPointIds().SetId(1, nid_map[node_ids[1]])
elem.GetPointIds().SetId(2, nid_map[node_ids[2]])
elem.GetPointIds().SetId(3, nid_map[node_ids[3]])
grid.InsertNextCell(10, elem.GetPointIds())
#elem_nid_map = {nid:nid_map[nid] for nid in node_ids[:4]}
elif isinstance(element, CTETRA10):
node_ids = element.node_ids
pid = element.Pid()
for nid in node_ids:
if nid is not None:
nid_to_pid_map[nid].append(pid)
eid_to_nid_map[eid] = node_ids[:4]
if None not in node_ids:
elem = vtkQuadraticTetra()
elem.GetPointIds().SetId(4, nid_map[node_ids[4]])
elem.GetPointIds().SetId(5, nid_map[node_ids[5]])
elem.GetPointIds().SetId(6, nid_map[node_ids[6]])
elem.GetPointIds().SetId(7, nid_map[node_ids[7]])
elem.GetPointIds().SetId(8, nid_map[node_ids[8]])
elem.GetPointIds().SetId(9, nid_map[node_ids[9]])
else:
elem = vtkTetra()
elem.GetPointIds().SetId(0, nid_map[node_ids[0]])
elem.GetPointIds().SetId(1, nid_map[node_ids[1]])
elem.GetPointIds().SetId(2, nid_map[node_ids[2]])
elem.GetPointIds().SetId(3, nid_map[node_ids[3]])
grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
elif isinstance(element, CPENTA6):
elem = vtkWedge()
node_ids = element.node_ids
pid = element.Pid()
for nid in node_ids:
nid_to_pid_map[nid].append(pid)
eid_to_nid_map[eid] = node_ids[:6]
elem.GetPointIds().SetId(0, nid_map[node_ids[0]])
elem.GetPointIds().SetId(1, nid_map[node_ids[1]])
elem.GetPointIds().SetId(2, nid_map[node_ids[2]])
elem.GetPointIds().SetId(3, nid_map[node_ids[3]])
elem.GetPointIds().SetId(4, nid_map[node_ids[4]])
elem.GetPointIds().SetId(5, nid_map[node_ids[5]])
grid.InsertNextCell(13, elem.GetPointIds())
elif isinstance(element, CPENTA15):
node_ids = element.node_ids
pid = element.Pid()
for nid in node_ids:
if nid is not None:
nid_to_pid_map[nid].append(pid)
eid_to_nid_map[eid] = node_ids[:6]
if None not in node_ids:
elem = vtkQuadraticWedge()
elem.GetPointIds().SetId(6, nid_map[node_ids[6]])
elem.GetPointIds().SetId(7, nid_map[node_ids[7]])
elem.GetPointIds().SetId(8, nid_map[node_ids[8]])
elem.GetPointIds().SetId(9, nid_map[node_ids[9]])
elem.GetPointIds().SetId(10, nid_map[node_ids[10]])
elem.GetPointIds().SetId(11, nid_map[node_ids[11]])
elem.GetPointIds().SetId(12, nid_map[node_ids[12]])
elem.GetPointIds().SetId(13, nid_map[node_ids[13]])
elem.GetPointIds().SetId(14, nid_map[node_ids[14]])
else:
elem = vtkWedge()
elem.GetPointIds().SetId(0, nid_map[node_ids[0]])
elem.GetPointIds().SetId(1, nid_map[node_ids[1]])
elem.GetPointIds().SetId(2, nid_map[node_ids[2]])
elem.GetPointIds().SetId(3, nid_map[node_ids[3]])
elem.GetPointIds().SetId(4, nid_map[node_ids[4]])
elem.GetPointIds().SetId(5, nid_map[node_ids[5]])
grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
elif isinstance(element, (CHEXA8, CIHEX1)):
node_ids = element.node_ids
pid = element.Pid()
for nid in node_ids:
nid_to_pid_map[nid].append(pid)
eid_to_nid_map[eid] = node_ids[:8]
elem = vtkHexahedron()
elem.GetPointIds().SetId(0, nid_map[node_ids[0]])
elem.GetPointIds().SetId(1, nid_map[node_ids[1]])
elem.GetPointIds().SetId(2, nid_map[node_ids[2]])
elem.GetPointIds().SetId(3, nid_map[node_ids[3]])
elem.GetPointIds().SetId(4, nid_map[node_ids[4]])
elem.GetPointIds().SetId(5, nid_map[node_ids[5]])
elem.GetPointIds().SetId(6, nid_map[node_ids[6]])
elem.GetPointIds().SetId(7, nid_map[node_ids[7]])
grid.InsertNextCell(12, elem.GetPointIds())
elif isinstance(element, (CHEXA20, CIHEX2)):
node_ids = element.node_ids
pid = element.Pid()
for nid in node_ids:
if nid is not None:
nid_to_pid_map[nid].append(pid)
if None not in node_ids:
elem = vtkQuadraticHexahedron()
elem.GetPointIds().SetId(8, nid_map[node_ids[8]])
elem.GetPointIds().SetId(9, nid_map[node_ids[9]])
elem.GetPointIds().SetId(10, nid_map[node_ids[10]])
elem.GetPointIds().SetId(11, nid_map[node_ids[11]])
# these two blocks are flipped
elem.GetPointIds().SetId(12, nid_map[node_ids[16]])
elem.GetPointIds().SetId(13, nid_map[node_ids[17]])
elem.GetPointIds().SetId(14, nid_map[node_ids[18]])
elem.GetPointIds().SetId(15, nid_map[node_ids[19]])
elem.GetPointIds().SetId(16, nid_map[node_ids[12]])
elem.GetPointIds().SetId(17, nid_map[node_ids[13]])
elem.GetPointIds().SetId(18, nid_map[node_ids[14]])
elem.GetPointIds().SetId(19, nid_map[node_ids[15]])
else:
elem = vtkHexahedron()
eid_to_nid_map[eid] = node_ids[:8]
elem.GetPointIds().SetId(0, nid_map[node_ids[0]])
elem.GetPointIds().SetId(1, nid_map[node_ids[1]])
elem.GetPointIds().SetId(2, nid_map[node_ids[2]])
elem.GetPointIds().SetId(3, nid_map[node_ids[3]])
elem.GetPointIds().SetId(4, nid_map[node_ids[4]])
elem.GetPointIds().SetId(5, nid_map[node_ids[5]])
elem.GetPointIds().SetId(6, nid_map[node_ids[6]])
elem.GetPointIds().SetId(7, nid_map[node_ids[7]])
grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
elif isinstance(element, CPYRAM5):
node_ids = element.node_ids
pid = element.Pid()
for nid in node_ids:
nid_to_pid_map[nid].append(pid)
eid_to_nid_map[eid] = node_ids[:5]
elem = vtkPyramid()
elem.GetPointIds().SetId(0, nid_map[node_ids[0]])
elem.GetPointIds().SetId(1, nid_map[node_ids[1]])
elem.GetPointIds().SetId(2, nid_map[node_ids[2]])
elem.GetPointIds().SetId(3, nid_map[node_ids[3]])
elem.GetPointIds().SetId(4, nid_map[node_ids[4]])
# etype = 14
grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
elif isinstance(element, CPYRAM13):
node_ids = element.node_ids
pid = element.Pid()
#if None not in node_ids:
#print(' node_ids =', node_ids)
#elem = vtkQuadraticPyramid()
# etype = 27
#elem.GetPointIds().SetId(5, nid_map[node_ids[5]])
#elem.GetPointIds().SetId(6, nid_map[node_ids[6]])
#elem.GetPointIds().SetId(7, nid_map[node_ids[7]])
#elem.GetPointIds().SetId(8, nid_map[node_ids[8]])
#elem.GetPointIds().SetId(9, nid_map[node_ids[9]])
#elem.GetPointIds().SetId(10, nid_map[node_ids[10]])
#elem.GetPointIds().SetId(11, nid_map[node_ids[11]])
#elem.GetPointIds().SetId(12, nid_map[node_ids[12]])
#else:
elem = vtkPyramid()
#print('*node_ids =', node_ids[:5])
eid_to_nid_map[eid] = node_ids[:5]
elem.GetPointIds().SetId(0, nid_map[node_ids[0]])
elem.GetPointIds().SetId(1, nid_map[node_ids[1]])
elem.GetPointIds().SetId(2, nid_map[node_ids[2]])
elem.GetPointIds().SetId(3, nid_map[node_ids[3]])
elem.GetPointIds().SetId(4, nid_map[node_ids[4]])
grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
elif etype in {'CBUSH', 'CBUSH1D', 'CFAST',
'CELAS1', 'CELAS2', 'CELAS3', 'CELAS4',
'CDAMP1', 'CDAMP2', 'CDAMP3', 'CDAMP4', 'CDAMP5',
'CVISC', 'CGAP'}:
# TODO: verify
# CBUSH, CBUSH1D, CFAST, CELAS1, CELAS3
# CDAMP1, CDAMP3, CDAMP4, CDAMP5, CVISC
if hasattr(element, 'pid'):
pid = element.pid
else:
# CELAS2, CELAS4?
pid = 0
node_ids = element.node_ids
for nid in node_ids:
if nid is not None:
nid_to_pid_map[nid].append(pid)
if node_ids[0] is None and node_ids[0] is None: # CELAS2
log.warning('removing CELASx eid=%i -> no node %s' % (eid, node_ids[0]))
del self.eid_map[eid]
continue
if None in node_ids: # used to be 0...
if node_ids[0] is None:
slot = 1
elif node_ids[1] is None:
slot = 0
#print('node_ids=%s slot=%s' % (str(node_ids), slot))
eid_to_nid_map[eid] = node_ids[slot]
nid = node_ids[slot]
if nid not in nid_map:
# SPOINT
log.warning('removing CELASx eid=%i -> SPOINT %i' % (eid, nid))
continue
#c = nid_map[nid]
#if 1:
#print(str(element))
elem = vtk.vtkVertex()
elem.GetPointIds().SetId(0, j)
#else:
#elem = vtk.vtkSphere()
#elem = vtk.vtkSphereSource()
#if d == 0.:
#d = sphere_size
#elem.SetRadius(sphere_size)
grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
else:
# 2 points
#d = norm(element.nodes[0].get_position() - element.nodes[1].get_position())
eid_to_nid_map[eid] = node_ids
elem = vtk.vtkLine()
point_ids = elem.GetPointIds()
try:
point_ids.SetId(0, nid_map[node_ids[0]])
point_ids.SetId(1, nid_map[node_ids[1]])
except KeyError:
print("node_ids =", node_ids)
print(str(element))
continue
grid.InsertNextCell(line_type, point_ids)
elif etype in ('CBAR', 'CBEAM', 'CROD', 'CONROD', 'CTUBE'):
if etype == 'CONROD':
pid = 0
#areai = element.Area()
else:
pid = element.Pid()
#try:
#areai = element.pid_ref.Area()
#except:
#print(element)
#raise
node_ids = element.node_ids
for nid in node_ids:
nid_to_pid_map[nid].append(pid)
# 2 points
n1, n2 = np.searchsorted(nids, element.nodes)
#xyz1 = xyz_cid0[n1, :]
#xyz2 = xyz_cid0[n2, :]
eid_to_nid_map[eid] = node_ids
elem = vtk.vtkLine()
try:
n1, n2 = [nid_map[nid] for nid in node_ids]
except KeyError: # pragma: no cover
print("node_ids =", node_ids)
print(str(element))
print('nid_map = %s' % nid_map)
raise
point_ids = elem.GetPointIds()
point_ids.SetId(0, n1)
point_ids.SetId(1, n2)
grid.InsertNextCell(line_type, elem.GetPointIds())
elif etype == 'CBEND':
pid = element.Pid()
node_ids = element.node_ids
for nid in node_ids:
nid_to_pid_map[nid].append(pid)
# 2 points
n1, n2 = np.searchsorted(nids, element.nodes)
#xyz1 = xyz_cid0[n1, :]
#xyz2 = xyz_cid0[n2, :]
eid_to_nid_map[eid] = node_ids
if 0:
g0 = element.g0 #_vector
if not isinstance(g0, integer_types):
msg = 'CBEND: g0 must be an integer; g0=%s x=%s\n%s' % (
g0, element.x, element)
raise NotImplementedError(msg)
# only supports g0 as an integer
elem = vtk.vtkQuadraticEdge()
elem.GetPointIds().SetId(2, nid_map[g0])
else:
elem = vtk.vtkLine()
elem.GetPointIds().SetId(0, nid_map[node_ids[0]])
elem.GetPointIds().SetId(1, nid_map[node_ids[1]])
grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
elif etype == 'CHBDYG':
node_ids = element.node_ids
pid = 0
#pid = element.Pid()
for nid in node_ids:
if nid is not None:
nid_to_pid_map[nid].append(pid)
if element.surface_type in ['AREA4', 'AREA8']:
eid_to_nid_map[eid] = node_ids[:4]
n1, n2, n3, n4 = [nid_map[nid] for nid in node_ids[:4]]
#p1 = xyz_cid0[n1, :]
#p2 = xyz_cid0[n2, :]
#p3 = xyz_cid0[n3, :]
#p4 = xyz_cid0[n4, :]
if element.surface_type == 'AREA4' or None in node_ids:
elem = vtkQuad()
else:
elem = vtkQuadraticQuad()
elem.GetPointIds().SetId(4, nid_map[node_ids[4]])
elem.GetPointIds().SetId(5, nid_map[node_ids[5]])
elem.GetPointIds().SetId(6, nid_map[node_ids[6]])
elem.GetPointIds().SetId(7, nid_map[node_ids[7]])
elem.GetPointIds().SetId(0, n1)
elem.GetPointIds().SetId(1, n2)
elem.GetPointIds().SetId(2, n3)
elem.GetPointIds().SetId(3, n4)
grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
elif element.surface_type in ['AREA3', 'AREA6']:
eid_to_nid_map[eid] = node_ids[:3]
if element.Type == 'AREA3' or None in node_ids:
elem = vtkTriangle()
else:
elem = vtkQuadraticTriangle()
elem.GetPointIds().SetId(3, nid_map[node_ids[3]])
elem.GetPointIds().SetId(4, nid_map[node_ids[4]])
elem.GetPointIds().SetId(5, nid_map[node_ids[5]])
n1, n2, n3 = [nid_map[nid] for nid in node_ids[:3]]
#p1 = xyz_cid0[n1, :]
#p2 = xyz_cid0[n2, :]
#p3 = xyz_cid0[n3, :]
elem.GetPointIds().SetId(0, n1)
elem.GetPointIds().SetId(1, n2)
elem.GetPointIds().SetId(2, n3)
grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
else:
#print('removing\n%s' % (element))
self.log.warning('removing eid=%s; %s' % (eid, element.type))
del self.eid_map[eid]
self.gui.log_info("skipping %s" % element.type)
continue
#elif etype == 'CBYDYP':
elif etype == 'CHBDYE':
eid_solid = element.eid2
side = element.side
element_solid = model.elements[eid_solid]
try:
mapped_inids = SIDE_MAP[element_solid.type][side]
except KeyError: # pragma: no cover
log.warning('removing\n%s' % (element))
log.warning('removing eid=%s; %s' % (eid, element.type))
del self.eid_map[eid]
self.gui.log_info("skipping %s" % element.type)
continue
side_inids = [nid - 1 for nid in mapped_inids]
nodes = element_solid.node_ids
pid = 0
unused_nnodes = len(side_inids)
node_ids = [nodes[inid] for inid in side_inids]
#inids = np.searchsorted(all_nids, node_ids)
if len(side_inids) == 3:
n1, n2, n3 = [nid_map[nid] for nid in node_ids[:3]]
#p1 = xyz_cid0[n1, :]
#p2 = xyz_cid0[n2, :]
#p3 = xyz_cid0[n3, :]
elem = vtkTriangle()
elem.GetPointIds().SetId(0, n1)
elem.GetPointIds().SetId(1, n2)
elem.GetPointIds().SetId(2, n3)
elif len(side_inids) == 4:
n1, n2, n3, n4 = [nid_map[nid] for nid in node_ids[:4]]
#p1 = xyz_cid0[n1, :]
#p2 = xyz_cid0[n2, :]
#p3 = xyz_cid0[n3, :]
#p4 = xyz_cid0[n4, :]
elem = vtkQuad()
elem.GetPointIds().SetId(0, n1)
elem.GetPointIds().SetId(1, n2)
elem.GetPointIds().SetId(2, n3)
elem.GetPointIds().SetId(3, n4)
else:
msg = 'element_solid:\n%s' % (str(element_solid))
msg += 'mapped_inids = %s\n' % mapped_inids
msg += 'side_inids = %s\n' % side_inids
msg += 'nodes = %s\n' % nodes
#msg += 'side_nodes = %s\n' % side_nodes
raise NotImplementedError(msg)
grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
elif etype == 'GENEL':
node_ids = element.node_ids
pid = 0
elem = vtk.vtkLine()
elem.GetPointIds().SetId(0, nid_map[node_ids[0]])
elem.GetPointIds().SetId(1, nid_map[node_ids[1]])
else:
log.warning('removing\n%s' % (element))
log.warning('removing eid=%s; %s' % (eid, element.type))
del self.eid_map[eid]
self.gui.log_info("skipping %s" % element.type)
continue
# what about MPCs, RBE2s (rigid elements)?
# are they plotted as elements?
# and thus do they need a property?
if pid is None:
# CONROD
#print(element)
#pids[i] = 0
#pids_dict[eid] = 0
pass
else:
pids[i] = pid
pids_dict[eid] = pid
#print(eid, min_thetai, max_thetai, '\n', element)
i += 1
#assert len(self.eid_map) > 0, self.eid_map
#print('mapped elements')
nelements = i
self.gui.nelements = nelements
#print('nelements=%s pids=%s' % (nelements, list(pids)))
pids = pids[:nelements]
out = (
nid_to_pid_map, xyz_cid0, superelements, pids, nelements,
material_coord, material_theta,
area, min_interior_angle, max_interior_angle, max_aspect_ratio,
max_skew_angle, taper_ratio, dideal_theta,
area_ratio, min_edge_length, max_warp_angle,
)
return out
def _map_elements1_quality(self, model, xyz_cid0, nid_cp_cd, unused_dim_max, nid_map, j):
"""
Helper for map_elements
element checks
http://www.altairuniversity.com/wp-content/uploads/2012/04/Student_Guide_211-233.pdf
Skew:
Skew in trias is calculated by finding the minimum angle
between the vector from each node to the opposing mid-side
and the vector between the two adjacent mid-sides at each
node of the element. Ninety degrees minus the minimum angle
found is reported.
Skew in quads is calculated by finding the minimum angle
between two lines joining opposite midsides of the element.
Ninety degrees minus the minimum angle found is reported.
Aspect Ratio:
Aspect ratio in two-dimensional elements is calculated by
dividing the maximum length side of an element by the minimum
length side of the element. The aspect ratio check is
performed in the same fashion on all faces of 3D elements.
Warpage:
Warpage in two-dimensional elements is calculated by splitting
a quad into two trias and finding the angle between the two
planes which the trias form. The quad is then split again,
this time using the opposite corners and forming the second
set of trias. The angle between the two planes which the trias
form is then found. The maximum angle found between the planes
is the warpage of the element.
Warpage in three-dimensional elements is performed in the same
fashion on all faces of the element.
Jacobian:
determinant of Jacobian matrix (-1.0 to 1.0; 1.0 is ideal)
2D Checks:
Warp angle:
Warp angle is the out of plane angle
Ideal value = 0 degrees (Acceptable < 100).
Warp angle is not applicable for triangular elements.
It is defined as the angle between the normals to two planes
formed by splitting the quad element along the diagonals.
The maximum angle of the two possible angles is reported as
the warp angle.
Aspect Ratio:
Aspect = maximum element edge length / minimum element edge length
Ideal value = 1 (Acceptable < 5).
Skew:
Ideal value = 0 degrees (Acceptable < 45)
Skew for quadrilateral element = 90
minus the minimum angle between the two lines joining the
opposite mid-sides of the element (alpha).
Skew for triangular element = 90
minus the minimum angle between the lines from each node to
the opposing mid-side and between the two adjacent mid-sides
at each node of the element
Jacobian:
Ideal value = 1.0 (Acceptable > 0.6)
In simple terms, the jacobian is a scale factor arising
because of the transformation of the coordinate system.
Elements are tansformed from the global coordinates to
local coordinates (defined at the centroid of every
element), for faster analysis times.
Distortion:
Ideal value = 1.0 (Acceptable > 0.6)
Distortion is defined as:
d = |Jacobian| * AreaLCS / AreaGCS
LCS - Local Coordinate system
GCS - Global Coordinate system
Stretch:
Ideal value: 1.0 (Acceptable > 0.2)
For quadrilateral elements stretch = Lmin * sqrt(2) / dmax
Stretch for triangular element = R * sqrt(12) / Lmax
Included angles:
Skew is based on the overall shape of the element and it does
not take into account the individual angles of a quadrilateral
or triangular element. Included or interior angle check is
applied for individual angles.
Quad: Ideal value = 90 (Acceptable = 45 < theta <135)
Tria: Ideal value = 60 (Acceptable = 20 < theta < 120)
Taper:
Ideal value = 0 (Acceptable < 0.5)
Taper = sum( | (Ai - Aavg) / Aavg |)
Aavg = (A1 + A2 + A3 + A4) / 4
A1,A2 are one split form of the CQUAD4 and A3,A4 are the quad
split in the other direction.
"""
assert nid_map is not None
if xyz_cid0 is None:
nid_to_pid_map = None
superelements = None
pids = None
nelements = None
material_coord = None
material_theta = None
area = None
min_interior_angle = None
max_interior_angle = None
max_aspect_ratio = None
max_skew_angle = None
taper_ratio = None
dideal_theta = None
area_ratio = None
min_edge_length = None
max_warp_angle = None
out = (
nid_to_pid_map, xyz_cid0, superelements, pids, nelements, material_coord,
area, min_interior_angle, max_interior_angle, max_aspect_ratio,
max_skew_angle, taper_ratio, dideal_theta,
area_ratio, min_edge_length, max_warp_angle,
)
return out
xyz_cid0 = self.xyz_cid0
nids = nid_cp_cd[:, 0]
#sphere_size = self._get_sphere_size(dim_max)
# :param i: the element id in grid
# :param j: the element id in grid2
i = 0
#nids = self.eid_to_nid_map[eid]
self.eid_to_nid_map = {}
# the list of all pids
#pids = []
# pid = pids_dict[eid]
pids_dict = {}
elements, nelements, superelements = get_elements_nelements_unvectorized(model)
pids = np.zeros(nelements, 'int32')
material_coord = np.full(nelements, -1, dtype='int32')
material_theta = np.full(nelements, np.nan, dtype='float32')
min_interior_angle = np.zeros(nelements, 'float32')
max_interior_angle = np.zeros(nelements, 'float32')
dideal_theta = np.zeros(nelements, 'float32')
max_skew_angle = np.zeros(nelements, 'float32')
max_warp_angle = np.zeros(nelements, 'float32')
max_aspect_ratio = np.zeros(nelements, 'float32')
area = np.zeros(nelements, 'float32')
area_ratio = np.zeros(nelements, 'float32')
taper_ratio = np.zeros(nelements, 'float32')
min_edge_length = np.zeros(nelements, 'float32')
# pids_good = []
# pids_to_keep = []
# pids_btm = []
# pids_to_drop = []
# 3
# | \
# | \
# | \
# 1------2
# these normals point inwards
# 4
# / | \
# / | \
# 3-------2
# \ | /
# \ | /
# 1
_ctetra_faces = (
(0, 1, 2), # (1, 2, 3),
(0, 3, 1), # (1, 4, 2),
(0, 3, 2), # (1, 3, 4),
(1, 3, 2), # (2, 4, 3),
)
# these normals point inwards
#
#
#
#
# /4-----3
# / /
# / 5 /
# / \ /
# / \ /
# 1---------2
_cpyram_faces = (
(0, 1, 2, 3), # (1, 2, 3, 4),
(1, 4, 2), # (2, 5, 3),
(2, 4, 3), # (3, 5, 4),
(0, 3, 4), # (1, 4, 5),
(0, 4, 1), # (1, 5, 2),
)
# these normals point inwards
# /6
# / | \
# / | \
# 3\ | \
# | \ /4-----5
# | \/ /
# | / \ /
# | / \ /
# | / \ /
# 1---------2
_cpenta_faces = (
(0, 2, 1), # (1, 3, 2),
(3, 4, 5), # (4, 5, 6),
(0, 1, 4, 3), # (1, 2, 5, 4), # bottom
(1, 2, 5, 4), # (2, 3, 6, 5), # right
(0, 3, 5, 2), # (1, 4, 6, 3), # left
)
# these normals point inwards
# 8----7
# /| /|
# / | / |
# / 5-/--6
# 4-----3 /
# | / | /
# | / | /
# 1-----2
_chexa_faces = (
(4, 5, 6, 7), # (5, 6, 7, 8),
(0, 3, 2, 1), # (1, 4, 3, 2),
(1, 2, 6, 5), # (2, 3, 7, 6),
(2, 3, 7, 6), # (3, 4, 8, 7),
(0, 4, 7, 3), # (1, 5, 8, 4),
(0, 6, 5, 4), # (1, 7, 6, 5),
)
nid_to_pid_map = defaultdict(list)
pid = 0
log = self.log
grid = self.gui.grid
self._build_plotels(model)
#print("map_elements...")
eid_to_nid_map = self.eid_to_nid_map
eid_map = self.gui.eid_map
for (eid, element) in sorted(elements.items()):
eid_map[eid] = i
if i % 5000 == 0 and i > 0:
print(' map_elements = %i' % i)
etype = element.type
# if element.Pid() >= 82:
# continue
# if element.Pid() in pids_to_drop:
# continue
# if element.Pid() not in pids_to_keep:
# continue
# if element.pid.type == 'PSOLID':
# continue
pid = np.nan
dideal_thetai = np.nan
min_thetai = np.nan
max_thetai = np.nan
#max_thetai = np.nan
max_skew = np.nan
#max_warp = np.nan
max_warp = np.nan
aspect_ratio = np.nan
areai = np.nan
area_ratioi = np.nan
taper_ratioi = np.nan
min_edge_lengthi = np.nan
if isinstance(element, (CTRIA3, CTRIAR, CTRAX3, CPLSTN3)):
if isinstance(element, (CTRIA3, CTRIAR)):
mcid, theta = get_shell_material_coord(element)
material_coord[i] = mcid
material_theta[i] = theta
elem = vtkTriangle()
node_ids = element.node_ids
pid = element.Pid()
eid_to_nid_map[eid] = node_ids
for nid in node_ids:
if nid is not None:
nid_to_pid_map[nid].append(pid)
n1, n2, n3 = [nid_map[nid] for nid in node_ids]
p1 = xyz_cid0[n1, :]
p2 = xyz_cid0[n2, :]
p3 = xyz_cid0[n3, :]
out = tri_quality(p1, p2, p3)
(areai, max_skew, aspect_ratio,
min_thetai, max_thetai, dideal_thetai, min_edge_lengthi) = out
elem.GetPointIds().SetId(0, n1)
elem.GetPointIds().SetId(1, n2)
elem.GetPointIds().SetId(2, n3)
grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
elif isinstance(element, (CTRIA6, CPLSTN6, CTRIAX)):
# the CTRIAX is a standard 6-noded element
if isinstance(element, CTRIA6):
mcid, theta = get_shell_material_coord(element)
material_coord[i] = mcid
material_theta[i] = theta
node_ids = element.node_ids
pid = element.Pid()
eid_to_nid_map[eid] = node_ids[:3]
for nid in node_ids:
if nid is not None:
nid_to_pid_map[nid].append(pid)
if None not in node_ids:
elem = vtkQuadraticTriangle()
elem.GetPointIds().SetId(3, nid_map[node_ids[3]])
elem.GetPointIds().SetId(4, nid_map[node_ids[4]])
elem.GetPointIds().SetId(5, nid_map[node_ids[5]])
else:
elem = vtkTriangle()
n1, n2, n3 = [nid_map[nid] for nid in node_ids[:3]]
p1 = xyz_cid0[n1, :]
p2 = xyz_cid0[n2, :]
p3 = xyz_cid0[n3, :]
out = tri_quality(p1, p2, p3)
(areai, max_skew, aspect_ratio,
min_thetai, max_thetai, dideal_thetai, min_edge_lengthi) = out
elem.GetPointIds().SetId(0, n1)
elem.GetPointIds().SetId(1, n2)
elem.GetPointIds().SetId(2, n3)
grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
elif isinstance(element, CTRIAX6):
# the CTRIAX6 is not a standard second-order triangle
#
# 5
# |\
# | \
# 6 4
# | \
# | \
# 1----2----3
#
#material_coord[i] = element.theta # TODO: no mcid
# midside nodes are required, nodes out of order
node_ids = element.node_ids
pid = element.Pid()
for nid in node_ids:
if nid is not None:
nid_to_pid_map[nid].append(pid)
if None not in node_ids:
elem = vtkQuadraticTriangle()
elem.GetPointIds().SetId(3, nid_map[node_ids[1]])
elem.GetPointIds().SetId(4, nid_map[node_ids[3]])
elem.GetPointIds().SetId(5, nid_map[node_ids[5]])
else:
elem = vtkTriangle()
n1 = nid_map[node_ids[0]]
n2 = nid_map[node_ids[2]]
n3 = nid_map[node_ids[4]]
p1 = xyz_cid0[n1, :]
p2 = xyz_cid0[n2, :]
p3 = xyz_cid0[n3, :]
out = tri_quality(p1, p2, p3)
(areai, max_skew, aspect_ratio,
min_thetai, max_thetai, dideal_thetai, min_edge_lengthi) = out
elem.GetPointIds().SetId(0, n1)
elem.GetPointIds().SetId(1, n2)
elem.GetPointIds().SetId(2, n3)
eid_to_nid_map[eid] = [node_ids[0], node_ids[2], node_ids[4]]
grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
elif isinstance(element, (CQUAD4, CSHEAR, CQUADR, CPLSTN4, CQUADX4)):
if isinstance(element, (CQUAD4, CQUADR)):
mcid, theta = get_shell_material_coord(element)
material_coord[i] = mcid
material_theta[i] = theta
node_ids = element.node_ids
pid = element.Pid()
for nid in node_ids:
if nid is not None:
nid_to_pid_map[nid].append(pid)
eid_to_nid_map[eid] = node_ids
try:
n1, n2, n3, n4 = [nid_map[nid] for nid in node_ids]
except KeyError: # pragma: no cover
print("node_ids =", node_ids)
print(str(element))
#print('nid_map = %s' % nid_map)
raise
#continue
p1 = xyz_cid0[n1, :]
p2 = xyz_cid0[n2, :]
p3 = xyz_cid0[n3, :]
p4 = xyz_cid0[n4, :]
out = quad_quality(element, p1, p2, p3, p4)
(areai, taper_ratioi, area_ratioi, max_skew, aspect_ratio,
min_thetai, max_thetai, dideal_thetai, min_edge_lengthi, max_warp) = out
elem = vtkQuad()
elem.GetPointIds().SetId(0, n1)
elem.GetPointIds().SetId(1, n2)
elem.GetPointIds().SetId(2, n3)
elem.GetPointIds().SetId(3, n4)
grid.InsertNextCell(9, elem.GetPointIds())
elif isinstance(element, (CQUAD8, CPLSTN8, CQUADX8)):
if isinstance(element, CQUAD8):
mcid, theta = get_shell_material_coord(element)
material_coord[i] = mcid
material_theta[i] = theta
node_ids = element.node_ids
pid = element.Pid()
for nid in node_ids:
if nid is not None:
nid_to_pid_map[nid].append(pid)
self.eid_to_nid_map[eid] = node_ids[:4]
n1, n2, n3, n4 = [nid_map[nid] for nid in node_ids[:4]]
p1 = xyz_cid0[n1, :]
p2 = xyz_cid0[n2, :]
p3 = xyz_cid0[n3, :]
p4 = xyz_cid0[n4, :]
out = quad_quality(element, p1, p2, p3, p4)
(areai, taper_ratioi, area_ratioi, max_skew, aspect_ratio,
min_thetai, max_thetai, dideal_thetai, min_edge_lengthi, max_warp) = out
if None not in node_ids:
elem = vtkQuadraticQuad()
elem.GetPointIds().SetId(4, nid_map[node_ids[4]])
elem.GetPointIds().SetId(5, nid_map[node_ids[5]])
elem.GetPointIds().SetId(6, nid_map[node_ids[6]])
elem.GetPointIds().SetId(7, nid_map[node_ids[7]])
else:
elem = vtkQuad()
elem.GetPointIds().SetId(0, n1)
elem.GetPointIds().SetId(1, n2)
elem.GetPointIds().SetId(2, n3)
elem.GetPointIds().SetId(3, n4)
grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
elif isinstance(element, (CQUAD, CQUADX)):
# CQUAD, CQUADX are 9 noded quads
mcid, theta = get_shell_material_coord(element)
material_coord[i] = mcid
material_theta[i] = theta
node_ids = element.node_ids
pid = element.Pid()
for nid in node_ids:
if nid is not None:
nid_to_pid_map[nid].append(pid)
self.eid_to_nid_map[eid] = node_ids[:4]
n1, n2, n3, n4 = [nid_map[nid] for nid in node_ids[:4]]
p1 = xyz_cid0[n1, :]
p2 = xyz_cid0[n2, :]
p3 = xyz_cid0[n3, :]
p4 = xyz_cid0[n4, :]
out = quad_quality(element, p1, p2, p3, p4)
(areai, taper_ratioi, area_ratioi, max_skew, aspect_ratio,
min_thetai, max_thetai, dideal_thetai, min_edge_lengthi, max_warp) = out
if None not in node_ids:
elem = vtk.vtkBiQuadraticQuad()
elem.GetPointIds().SetId(4, nid_map[node_ids[4]])
elem.GetPointIds().SetId(5, nid_map[node_ids[5]])
elem.GetPointIds().SetId(6, nid_map[node_ids[6]])
elem.GetPointIds().SetId(7, nid_map[node_ids[7]])
elem.GetPointIds().SetId(8, nid_map[node_ids[8]])
else:
elem = vtkQuad()
elem.GetPointIds().SetId(0, n1)
elem.GetPointIds().SetId(1, n2)
elem.GetPointIds().SetId(2, n3)
elem.GetPointIds().SetId(3, n4)
grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
elif isinstance(element, CTETRA4):
elem = vtkTetra()
node_ids = element.node_ids
pid = element.Pid()
for nid in node_ids:
nid_to_pid_map[nid].append(pid)
eid_to_nid_map[eid] = node_ids[:4]
elem.GetPointIds().SetId(0, nid_map[node_ids[0]])
elem.GetPointIds().SetId(1, nid_map[node_ids[1]])
elem.GetPointIds().SetId(2, nid_map[node_ids[2]])
elem.GetPointIds().SetId(3, nid_map[node_ids[3]])
grid.InsertNextCell(10, elem.GetPointIds())
#elem_nid_map = {nid:nid_map[nid] for nid in node_ids[:4]}
min_thetai, max_thetai, dideal_thetai, min_edge_lengthi = get_min_max_theta(
_ctetra_faces, node_ids[:4], nid_map, xyz_cid0)
elif isinstance(element, CTETRA10):
node_ids = element.node_ids
pid = element.Pid()
for nid in node_ids:
if nid is not None:
nid_to_pid_map[nid].append(pid)
eid_to_nid_map[eid] = node_ids[:4]
if None not in node_ids:
elem = vtkQuadraticTetra()
elem.GetPointIds().SetId(4, nid_map[node_ids[4]])
elem.GetPointIds().SetId(5, nid_map[node_ids[5]])
elem.GetPointIds().SetId(6, nid_map[node_ids[6]])
elem.GetPointIds().SetId(7, nid_map[node_ids[7]])
elem.GetPointIds().SetId(8, nid_map[node_ids[8]])
elem.GetPointIds().SetId(9, nid_map[node_ids[9]])
else:
elem = vtkTetra()
elem.GetPointIds().SetId(0, nid_map[node_ids[0]])
elem.GetPointIds().SetId(1, nid_map[node_ids[1]])
elem.GetPointIds().SetId(2, nid_map[node_ids[2]])
elem.GetPointIds().SetId(3, nid_map[node_ids[3]])
grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
min_thetai, max_thetai, dideal_thetai, min_edge_lengthi = get_min_max_theta(
_ctetra_faces, node_ids[:4], nid_map, xyz_cid0)
elif isinstance(element, CPENTA6):
elem = vtkWedge()
node_ids = element.node_ids
pid = element.Pid()
for nid in node_ids:
nid_to_pid_map[nid].append(pid)
eid_to_nid_map[eid] = node_ids[:6]
elem.GetPointIds().SetId(0, nid_map[node_ids[0]])
elem.GetPointIds().SetId(1, nid_map[node_ids[1]])
elem.GetPointIds().SetId(2, nid_map[node_ids[2]])
elem.GetPointIds().SetId(3, nid_map[node_ids[3]])
elem.GetPointIds().SetId(4, nid_map[node_ids[4]])
elem.GetPointIds().SetId(5, nid_map[node_ids[5]])
grid.InsertNextCell(13, elem.GetPointIds())
min_thetai, max_thetai, dideal_thetai, min_edge_lengthi = get_min_max_theta(
_cpenta_faces, node_ids[:6], nid_map, xyz_cid0)
elif isinstance(element, CPENTA15):
node_ids = element.node_ids
pid = element.Pid()
for nid in node_ids:
if nid is not None:
nid_to_pid_map[nid].append(pid)
eid_to_nid_map[eid] = node_ids[:6]
if None not in node_ids:
elem = vtkQuadraticWedge()
elem.GetPointIds().SetId(6, nid_map[node_ids[6]])
elem.GetPointIds().SetId(7, nid_map[node_ids[7]])
elem.GetPointIds().SetId(8, nid_map[node_ids[8]])
elem.GetPointIds().SetId(9, nid_map[node_ids[9]])
elem.GetPointIds().SetId(10, nid_map[node_ids[10]])
elem.GetPointIds().SetId(11, nid_map[node_ids[11]])
elem.GetPointIds().SetId(12, nid_map[node_ids[12]])
elem.GetPointIds().SetId(13, nid_map[node_ids[13]])
elem.GetPointIds().SetId(14, nid_map[node_ids[14]])
else:
elem = vtkWedge()
elem.GetPointIds().SetId(0, nid_map[node_ids[0]])
elem.GetPointIds().SetId(1, nid_map[node_ids[1]])
elem.GetPointIds().SetId(2, nid_map[node_ids[2]])
elem.GetPointIds().SetId(3, nid_map[node_ids[3]])
elem.GetPointIds().SetId(4, nid_map[node_ids[4]])
elem.GetPointIds().SetId(5, nid_map[node_ids[5]])
grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
min_thetai, max_thetai, dideal_thetai, min_edge_lengthi = get_min_max_theta(
_cpenta_faces, node_ids[:6], nid_map, xyz_cid0)
elif isinstance(element, (CHEXA8, CIHEX1)):
node_ids = element.node_ids
pid = element.Pid()
for nid in node_ids:
nid_to_pid_map[nid].append(pid)
eid_to_nid_map[eid] = node_ids[:8]
elem = vtkHexahedron()
elem.GetPointIds().SetId(0, nid_map[node_ids[0]])
elem.GetPointIds().SetId(1, nid_map[node_ids[1]])
elem.GetPointIds().SetId(2, nid_map[node_ids[2]])
elem.GetPointIds().SetId(3, nid_map[node_ids[3]])
elem.GetPointIds().SetId(4, nid_map[node_ids[4]])
elem.GetPointIds().SetId(5, nid_map[node_ids[5]])
elem.GetPointIds().SetId(6, nid_map[node_ids[6]])
elem.GetPointIds().SetId(7, nid_map[node_ids[7]])
grid.InsertNextCell(12, elem.GetPointIds())
min_thetai, max_thetai, dideal_thetai, min_edge_lengthi = get_min_max_theta(
_chexa_faces, node_ids[:8], nid_map, xyz_cid0)
elif isinstance(element, (CHEXA20, CIHEX2)):
node_ids = element.node_ids
pid = element.Pid()
for nid in node_ids:
if nid is not None:
nid_to_pid_map[nid].append(pid)
if None not in node_ids:
elem = vtkQuadraticHexahedron()
elem.GetPointIds().SetId(8, nid_map[node_ids[8]])
elem.GetPointIds().SetId(9, nid_map[node_ids[9]])
elem.GetPointIds().SetId(10, nid_map[node_ids[10]])
elem.GetPointIds().SetId(11, nid_map[node_ids[11]])
# these two blocks are flipped
elem.GetPointIds().SetId(12, nid_map[node_ids[16]])
elem.GetPointIds().SetId(13, nid_map[node_ids[17]])
elem.GetPointIds().SetId(14, nid_map[node_ids[18]])
elem.GetPointIds().SetId(15, nid_map[node_ids[19]])
elem.GetPointIds().SetId(16, nid_map[node_ids[12]])
elem.GetPointIds().SetId(17, nid_map[node_ids[13]])
elem.GetPointIds().SetId(18, nid_map[node_ids[14]])
elem.GetPointIds().SetId(19, nid_map[node_ids[15]])
else:
elem = vtkHexahedron()
eid_to_nid_map[eid] = node_ids[:8]
elem.GetPointIds().SetId(0, nid_map[node_ids[0]])
elem.GetPointIds().SetId(1, nid_map[node_ids[1]])
elem.GetPointIds().SetId(2, nid_map[node_ids[2]])
elem.GetPointIds().SetId(3, nid_map[node_ids[3]])
elem.GetPointIds().SetId(4, nid_map[node_ids[4]])
elem.GetPointIds().SetId(5, nid_map[node_ids[5]])
elem.GetPointIds().SetId(6, nid_map[node_ids[6]])
elem.GetPointIds().SetId(7, nid_map[node_ids[7]])
grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
min_thetai, max_thetai, dideal_thetai, min_edge_lengthi = get_min_max_theta(
_chexa_faces, node_ids[:8], nid_map, xyz_cid0)
elif isinstance(element, CPYRAM5):
node_ids = element.node_ids
pid = element.Pid()
for nid in node_ids:
nid_to_pid_map[nid].append(pid)
eid_to_nid_map[eid] = node_ids[:5]
elem = vtkPyramid()
elem.GetPointIds().SetId(0, nid_map[node_ids[0]])
elem.GetPointIds().SetId(1, nid_map[node_ids[1]])
elem.GetPointIds().SetId(2, nid_map[node_ids[2]])
elem.GetPointIds().SetId(3, nid_map[node_ids[3]])
elem.GetPointIds().SetId(4, nid_map[node_ids[4]])
# etype = 14
grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
min_thetai, max_thetai, dideal_thetai, min_edge_lengthi = get_min_max_theta(
_cpyram_faces, node_ids[:5], nid_map, xyz_cid0)
elif isinstance(element, CPYRAM13):
node_ids = element.node_ids
pid = element.Pid()
#if None not in node_ids:
#print(' node_ids =', node_ids)
#elem = vtkQuadraticPyramid()
# etype = 27
#elem.GetPointIds().SetId(5, nid_map[node_ids[5]])
#elem.GetPointIds().SetId(6, nid_map[node_ids[6]])
#elem.GetPointIds().SetId(7, nid_map[node_ids[7]])
#elem.GetPointIds().SetId(8, nid_map[node_ids[8]])
#elem.GetPointIds().SetId(9, nid_map[node_ids[9]])
#elem.GetPointIds().SetId(10, nid_map[node_ids[10]])
#elem.GetPointIds().SetId(11, nid_map[node_ids[11]])
#elem.GetPointIds().SetId(12, nid_map[node_ids[12]])
#else:
elem = vtkPyramid()
#print('*node_ids =', node_ids[:5])
eid_to_nid_map[eid] = node_ids[:5]
elem.GetPointIds().SetId(0, nid_map[node_ids[0]])
elem.GetPointIds().SetId(1, nid_map[node_ids[1]])
elem.GetPointIds().SetId(2, nid_map[node_ids[2]])
elem.GetPointIds().SetId(3, nid_map[node_ids[3]])
elem.GetPointIds().SetId(4, nid_map[node_ids[4]])
grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
min_thetai, max_thetai, dideal_thetai, min_edge_lengthi = get_min_max_theta(
_cpyram_faces, node_ids[:5], nid_map, xyz_cid0)
elif etype in ('CBUSH', 'CBUSH1D', 'CFAST',
'CELAS1', 'CELAS2', 'CELAS3', 'CELAS4',
'CDAMP1', 'CDAMP2', 'CDAMP3', 'CDAMP4', 'CDAMP5',
'CVISC', 'CGAP'):
# TODO: verify
# CBUSH, CBUSH1D, CFAST, CELAS1, CELAS3
# CDAMP1, CDAMP3, CDAMP4, CDAMP5, CVISC
if hasattr(element, 'pid'):
pid = element.pid
else:
# CELAS2, CELAS4?
pid = 0
node_ids = element.node_ids
for nid in node_ids:
if nid is not None:
nid_to_pid_map[nid].append(pid)
if node_ids[0] is None and node_ids[0] is None: # CELAS2
log.warning('removing CELASx eid=%i -> no node %s' % (eid, node_ids[0]))
del self.eid_map[eid]
continue
if None in node_ids: # used to be 0...
if node_ids[0] is None:
slot = 1
elif node_ids[1] is None:
slot = 0
#print('node_ids=%s slot=%s' % (str(node_ids), slot))
eid_to_nid_map[eid] = node_ids[slot]
nid = node_ids[slot]
if nid not in nid_map:
# SPOINT
log.warning('removing CELASx eid=%i -> SPOINT %i' % (eid, nid))
continue
#c = nid_map[nid]
#if 1:
elem = vtk.vtkVertex()
elem.GetPointIds().SetId(0, j)
#else:
#elem = vtk.vtkSphere()
#elem = vtk.vtkSphereSource()
#if d == 0.:
#d = sphere_size
#elem.SetRadius(sphere_size)
else:
# 2 points
#d = norm(element.nodes[0].get_position() - element.nodes[1].get_position())
eid_to_nid_map[eid] = node_ids
elem = vtk.vtkLine()
try:
elem.GetPointIds().SetId(0, nid_map[node_ids[0]])
elem.GetPointIds().SetId(1, nid_map[node_ids[1]])
except KeyError:
print("node_ids =", node_ids)
print(str(element))
continue
grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
elif etype in ('CBAR', 'CBEAM', 'CROD', 'CONROD', 'CTUBE'):
if etype == 'CONROD':
pid = 0
areai = element.Area()
else:
pid = element.Pid()
try:
areai = element.pid_ref.Area()
except:
print(element)
raise
node_ids = element.node_ids
for nid in node_ids:
nid_to_pid_map[nid].append(pid)
# 2 points
#min_edge_lengthi = norm(element.nodes_ref[0].get_position() -
#element.nodes_ref[1].get_position())
try:
n1, n2 = np.searchsorted(nids, element.nodes)
except:
print(element.get_stats())
n1i, n2i = element.nodes
print('nids =', nids)
assert n1i in nids, 'n1=%s could not be found' % n1i
assert n2i in nids, 'n2=%s could not be found' % n2i
raise
xyz1 = xyz_cid0[n1, :]
xyz2 = xyz_cid0[n2, :]
min_edge_lengthi = norm(xyz2 - xyz1)
eid_to_nid_map[eid] = node_ids
elem = vtk.vtkLine()
try:
n1, n2 = [nid_map[nid] for nid in node_ids]
except KeyError: # pragma: no cover
print("node_ids =", node_ids)
print(str(element))
print('nid_map = %s' % nid_map)
raise
point_ids = elem.GetPointIds()
point_ids.SetId(0, n1)
point_ids.SetId(1, n2)
grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
elif etype == 'CBEND':
pid = element.Pid()
node_ids = element.node_ids
for nid in node_ids:
nid_to_pid_map[nid].append(pid)
# 2 points
n1, n2 = np.searchsorted(nids, element.nodes)
xyz1 = xyz_cid0[n1, :]
xyz2 = xyz_cid0[n2, :]
#min_edge_lengthi = norm(element.nodes_ref[0].get_position() -
#element.nodes_ref[1].get_position())
eid_to_nid_map[eid] = node_ids
g0 = element.g0 #_vector
if not isinstance(g0, integer_types):
msg = 'CBEND: g0 must be an integer; g0=%s x=%s\n%s' % (
g0, element.x, element)
raise NotImplementedError(msg)
# only supports g0 as an integer
elem = vtk.vtkQuadraticEdge()
elem.GetPointIds().SetId(0, nid_map[node_ids[0]])
elem.GetPointIds().SetId(1, nid_map[node_ids[1]])
elem.GetPointIds().SetId(2, nid_map[g0])
grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
elif etype == 'CHBDYG':
node_ids = element.node_ids
pid = 0
#pid = element.Pid()
for nid in node_ids:
if nid is not None:
nid_to_pid_map[nid].append(pid)
if element.surface_type in ('AREA4', 'AREA8'):
eid_to_nid_map[eid] = node_ids[:4]
n1, n2, n3, n4 = [nid_map[nid] for nid in node_ids[:4]]
p1 = xyz_cid0[n1, :]
p2 = xyz_cid0[n2, :]
p3 = xyz_cid0[n3, :]
p4 = xyz_cid0[n4, :]
out = quad_quality(element, p1, p2, p3, p4)
(areai, taper_ratioi, area_ratioi, max_skew, aspect_ratio,
min_thetai, max_thetai, dideal_thetai, min_edge_lengthi, max_warp) = out
if element.surface_type == 'AREA4' or None in node_ids:
elem = vtkQuad()
else:
elem = vtkQuadraticQuad()
elem.GetPointIds().SetId(4, nid_map[node_ids[4]])
elem.GetPointIds().SetId(5, nid_map[node_ids[5]])
elem.GetPointIds().SetId(6, nid_map[node_ids[6]])
elem.GetPointIds().SetId(7, nid_map[node_ids[7]])
elem.GetPointIds().SetId(0, n1)
elem.GetPointIds().SetId(1, n2)
elem.GetPointIds().SetId(2, n3)
elem.GetPointIds().SetId(3, n4)
grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
elif element.surface_type in ['AREA3', 'AREA6']:
eid_to_nid_map[eid] = node_ids[:3]
if element.Type == 'AREA3' or None in node_ids:
elem = vtkTriangle()
else:
elem = vtkQuadraticTriangle()
elem.GetPointIds().SetId(3, nid_map[node_ids[3]])
elem.GetPointIds().SetId(4, nid_map[node_ids[4]])
elem.GetPointIds().SetId(5, nid_map[node_ids[5]])
n1, n2, n3 = [nid_map[nid] for nid in node_ids[:3]]
p1 = xyz_cid0[n1, :]
p2 = xyz_cid0[n2, :]
p3 = xyz_cid0[n3, :]
out = tri_quality(p1, p2, p3)
(areai, max_skew, aspect_ratio,
min_thetai, max_thetai, dideal_thetai, min_edge_lengthi) = out
elem.GetPointIds().SetId(0, n1)
elem.GetPointIds().SetId(1, n2)
elem.GetPointIds().SetId(2, n3)
grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
else:
#print('removing\n%s' % (element))
log.warning('removing eid=%s; %s' % (eid, element.type))
del self.eid_map[eid]
self.gui.log_info("skipping %s" % element.type)
continue
elif etype == 'CHBDYP':
#| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
#| CHBDYP | EID | PID | TYPE | IVIEWF | IVIEWB | G1 | G2 | G0 |
#| | RADMIDF | RADMIDB | GMID | CE | E1 | E2 | E3 | |
pid = 0 # element.pid
node_ids = element.node_ids
if element.Type == 'LINE':
n1, n2 = [nid_map[nid] for nid in node_ids[:2]]
p1 = xyz_cid0[n1, :]
p2 = xyz_cid0[n2, :]
elem = vtk.vtkLine()
elem.GetPointIds().SetId(0, n1)
elem.GetPointIds().SetId(1, n2)
else:
msg = 'element_solid:\n%s' % (str(element_solid))
msg += 'mapped_inids = %s\n' % mapped_inids
msg += 'side_inids = %s\n' % side_inids
msg += 'nodes = %s\n' % nodes
#msg += 'side_nodes = %s\n' % side_nodes
raise NotImplementedError(msg)
grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
elif etype == 'CHBDYE':
#| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
#| CHBDYE | EID | EID2 | SIDE | IVIEWF | IVIEWB | RADMIDF | RADMIDB |
eid_solid = element.eid2
side = element.side
element_solid = model.elements[eid_solid]
try:
mapped_inids = SIDE_MAP[element_solid.type][side]
except KeyError: # pragma: no cover
log.warning('removing\n%s' % (element))
log.warning('removing eid=%s; %s' % (eid, element.type))
del self.eid_map[eid]
self.gui.log_info("skipping %s" % element.type)
continue
side_inids = [nid - 1 for nid in mapped_inids]
nodes = element_solid.node_ids
pid = 0
unused_nnodes = len(side_inids)
node_ids = [nodes[inid] for inid in side_inids]
#inids = np.searchsorted(all_nids, node_ids)
#if len(side_inids) == 2:
#n1, n2 = [nid_map[nid] for nid in node_ids[:2]]
#p1 = xyz_cid0[n1, :]
#p2 = xyz_cid0[n2, :]
#elem = vtk.vtkLine()
#elem.GetPointIds().SetId(0, n1)
#elem.GetPointIds().SetId(1, n2)
if len(side_inids) == 3:
n1, n2, n3 = [nid_map[nid] for nid in node_ids[:3]]
p1 = xyz_cid0[n1, :]
p2 = xyz_cid0[n2, :]
p3 = xyz_cid0[n3, :]
out = tri_quality(p1, p2, p3)
(areai, max_skew, aspect_ratio,
min_thetai, max_thetai, dideal_thetai, min_edge_lengthi) = out
elem = vtkTriangle()
elem.GetPointIds().SetId(0, n1)
elem.GetPointIds().SetId(1, n2)
elem.GetPointIds().SetId(2, n3)
elif len(side_inids) == 4:
n1, n2, n3, n4 = [nid_map[nid] for nid in node_ids[:4]]
p1 = xyz_cid0[n1, :]
p2 = xyz_cid0[n2, :]
p3 = xyz_cid0[n3, :]
p4 = xyz_cid0[n4, :]
out = quad_quality(element, p1, p2, p3, p4)
(areai, taper_ratioi, area_ratioi, max_skew, aspect_ratio,
min_thetai, max_thetai, dideal_thetai, min_edge_lengthi, max_warp) = out
elem = vtkQuad()
elem.GetPointIds().SetId(0, n1)
elem.GetPointIds().SetId(1, n2)
elem.GetPointIds().SetId(2, n3)
elem.GetPointIds().SetId(3, n4)
else:
msg = 'element_solid:\n%s' % (str(element_solid))
msg += 'mapped_inids = %s\n' % mapped_inids
msg += 'side_inids = %s\n' % side_inids
msg += 'nodes = %s\n' % nodes
#msg += 'side_nodes = %s\n' % side_nodes
raise NotImplementedError(msg)
grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
elif etype == 'GENEL':
genel_nids = []
if len(element.ul_nodes):
genel_nids.append(element.ul_nodes)
if len(element.ud_nodes):
genel_nids.append(element.ud_nodes)
node_ids = np.unique(np.hstack(genel_nids))
node_ids = node_ids[:2]
del genel_nids
elem = vtk.vtkLine()
try:
n1, n2 = [nid_map[nid] for nid in node_ids]
except KeyError: # pragma: no cover
print("node_ids =", node_ids)
print(str(element))
print('nid_map = %s' % nid_map)
raise
point_ids = elem.GetPointIds()
point_ids.SetId(0, n1)
point_ids.SetId(1, n2)
grid.InsertNextCell(elem.GetCellType(), elem.GetPointIds())
#areai = np.nan
pid = 0
#cell_type = cell_type_line
#inids = np.searchsorted(all_nids, nids)
#p1, p2 = xyz_cid0[inids, :]
#min_edge_lengthi = norm(p2 - p1)
#nnodes = len(nids)
#dim = 1
else:
log.warning('removing\n%s' % (element))
log.warning('removing eid=%s; %s' % (eid, element.type))
del self.eid_map[eid]
self.gui.log_info("skipping %s" % element.type)
continue
# what about MPCs, RBE2s (rigid elements)?
# are they plotted as elements?
# and thus do they need a property?
if pid is None:
# CONROD
#print(element)
#pids[i] = 0
#pids_dict[eid] = 0
pass
else:
pids[i] = pid
pids_dict[eid] = pid
if np.isnan(max_thetai) and etype not in NO_THETA:
print('eid=%s theta=%s...setting to 360. deg' % (eid, max_thetai))
print(element.rstrip())
if isinstance(element.nodes[0], integer_types):
print(' nodes = %s' % element.nodes)
else:
for node in element.nodes:
print(str(node).rstrip())
max_thetai = 2 * np.pi
#print(eid, min_thetai, max_thetai, '\n', element)
min_interior_angle[i] = min_thetai
max_interior_angle[i] = max_thetai
dideal_theta[i] = dideal_thetai
max_skew_angle[i] = max_skew
max_warp_angle[i] = max_warp
max_aspect_ratio[i] = aspect_ratio
area[i] = areai
area_ratio[i] = area_ratioi
taper_ratio[i] = taper_ratioi
min_edge_length[i] = min_edge_lengthi
i += 1
#assert len(self.eid_map) > 0, self.eid_map
#print('mapped elements')
nelements = i
self.gui.nelements = nelements
#print('nelements=%s pids=%s' % (nelements, list(pids)))
pids = pids[:nelements]
out = (
nid_to_pid_map, xyz_cid0, superelements, pids, nelements,
material_coord, material_theta,
area, min_interior_angle, max_interior_angle, max_aspect_ratio,
max_skew_angle, taper_ratio, dideal_theta,
area_ratio, min_edge_length, max_warp_angle,
)
return out
def _build_properties(self, model: BDF, nelements: int, eids, pids,
cases, form0, icase: int) -> int:
"""
creates:
- PropertyID
TODO: CONROD
"""
upids = None
pcomp = None
pshell = None
is_pcomp = False
is_pshell = False
mids_pcomp = None
thickness_pcomp = None
nplies_pcomp = None
pcomp = {
'mids' : mids_pcomp,
'thickness' : thickness_pcomp,
'nplies' : nplies_pcomp,
}
mids = None
thickness = None
pshell = {
'mids' : mids,
'thickness' : thickness,
}
if not isfinite_and_greater_than(pids, 0):
return icase, upids, pcomp, pshell, (is_pshell, is_pcomp)
prop_types_with_mid = (
'PSOLID',
'PROD', 'PTUBE', 'PBAR', 'PBARL', 'PBEAM', 'PBEAML',
'PBEND',
)
prop_types_without_mid = ('PVISC', 'PELAS', 'PBUSH', 'PDAMP', 'PDAMPT')
pid_res = GuiResult(0, header='PropertyID', title='PropertyID',
location='centroid', scalar=pids, mask_value=0)
cases[icase] = (pid_res, (0, 'PropertyID'))
form0.append(('PropertyID', icase, []))
icase += 1
upids = np.unique(pids)
mid_eids_skip = []
#mids_pshell = None
#thickness_pshell = None
if 'PSHELL' in model.card_count:
is_pshell = True
pids_pcomp = model.get_card_ids_by_card_types(['PCOMP', 'PCOMPG'], combine=True)
properties = model.properties
for superelement in model.superelement_models.values():
properties.update(superelement.properties)
if pids_pcomp:
npliesi = 0
pcomp_nplies = 0
for pid in pids_pcomp:
prop = properties[pid]
pcomp_nplies = max(pcomp_nplies, prop.nplies + 1)
npliesi = max(npliesi, pcomp_nplies)
nplies_pcomp = np.zeros(nelements, dtype='int32')
mids_pcomp = np.zeros((nelements, npliesi), dtype='int32')
thickness_pcomp = np.full((nelements, npliesi), np.nan, dtype='float32')
mids_pcomp = np.zeros((nelements, npliesi), dtype='int32')
is_pcomp = True
#rho = np.full((nelements, nplies), np.nan, dtype='float32')
mids = np.zeros((nelements, 4), dtype='int32')
thickness = np.full((nelements, 4), np.nan, dtype='float32')
for pid in upids:
if pid == 0:
print('skipping pid=0')
continue
elif pid < 0:
continue
try:
prop = properties[pid]
except KeyError:
print('skipping pid=%i' % pid)
continue
if prop.type in prop_types_with_mid:
# simple types
i = np.where(pids == pid)[0]
mid = prop.mid_ref.mid
mids[i, 0] = mid
elif prop.type == 'PSHEAR':
i = np.where(pids == pid)[0]
mid = prop.mid_ref.mid
mids[i, 0] = mid
thickness[i, 0] = prop.Thickness()
elif prop.type == 'PSHELL':
i = np.where(pids == pid)[0]
mid1 = prop.Mid1()
mid2 = prop.Mid2()
mid3 = prop.Mid3()
mid4 = prop.Mid4()
mids[i, 0] = mid1 if mid1 is not None else 0
mids[i, 1] = mid2 if mid2 is not None else 0
mids[i, 2] = mid3 if mid3 is not None else 0
mids[i, 3] = mid4 if mid4 is not None else 0
thickness[i, 0] = prop.Thickness()
thickness[i, 1] = prop.twelveIt3
thickness[i, 2] = prop.tst
elif prop.type in ['PCOMP', 'PCOMPG']:
i = np.where(pids == pid)[0]
npliesi = prop.nplies
nplies_pcomp[i] = npliesi
thickness_pcomp[i, 0] = 0.
for iply in range(npliesi):
mids_pcomp[i, iply+1] = prop.Mid(iply)
thickniess_ply = prop.Thickness(iply)
thickness_pcomp[i, iply+1] = thickniess_ply
thickness_pcomp[i, 0] += thickniess_ply
#mids[i, 0] = mids[i, 1]
#elif prop.type == 'PSHEAR': # element has the thickness
#i = np.where(pids == pid)[0]
#mids[i, 0] = prop.Mid()
#thickness[i, 0] = elem.Thickness()
elif prop.type in prop_types_without_mid:
i = np.where(pids == pid)[0]
mid_eids_skip.append(i)
else:
print('material for pid=%s type=%s not considered' % (pid, prop.type))
#print('mids =', mids)
if len(mid_eids_skip):
mid_eids_skip = np.hstack(mid_eids_skip)
if mids.min() == 0:
i = np.where(mids == 0)[0]
diff_ids = np.setdiff1d(i, mid_eids_skip)
#eids_missing_material_id = eids[i]
not_skipped_eids_missing_material_id = eids[diff_ids]
if len(not_skipped_eids_missing_material_id):
print('eids=%s dont have materials' %
not_skipped_eids_missing_material_id)
pcomp = {
'mids' : mids_pcomp,
'thickness' : thickness_pcomp,
'nplies' : nplies_pcomp,
}
pshell = {
'mids' : mids,
'thickness' : thickness,
}
nplies = None
if is_pshell:
nplies = 1
if is_pcomp:
nplies = nplies_pcomp.max()
if self.gui.settings.nastran_is_shell_mcids and nplies is not None:
self._build_mcid_vectors(model, nplies)
return icase, upids, pcomp, pshell, (is_pshell, is_pcomp)
def _plot_pressures(self, model: BDF, cases, form0, icase: int, subcase_id: int) -> int:
"""
pressure act normal to a shell (as opposed to anti-normal to a solid face)
"""
# quit out if we're going to make pressure plots anyways
#if self.plot_applied_loads:
#return icase
# quit out if we don't have pressures
if not any(['PLOAD' in model.card_count, 'PLOAD2' in model.card_count,
'PLOAD4' in model.card_count]):
return icase
subcase = model.subcases[subcase_id]
try:
load_case_id = subcase.get_parameter('LOAD')[0]
except KeyError:
#self.gui.log.warning('LOAD not found in subcase_id=%s' % (subcase_id))
return icase
if load_case_id not in model.loads and load_case_id not in model.load_combinations:
self.gui.log.warning('LOAD=%s not found' % load_case_id)
return icase
is_pressure, pressures = get_pressure_array(
model, load_case_id, eids=self.element_ids, stop_on_failure=False)
if not is_pressure:
return icase
# if there is no applied pressure, don't make a plot
if np.abs(pressures).max():
case_name = 'Pressure'
# print('iload=%s' % iload)
# print(case_name)
pressure_res = GuiResult(
subcase_id, header='Pressure', title='Pressure',
location='centroid', scalar=pressures)
cases[icase] = (pressure_res, (0, 'Pressure'))
form0.append((case_name, icase, []))
icase += 1
return icase
def _plot_applied_loads(self, model, cases, form0, icase, subcase_id,
xref_loads=True, colormap='jet'):
"""
Applied loads include:
----------------------
- Centroidal Pressure
- Fx, Fy, Fz
- SPCDx, SPCDy, SPCDz, SPCDxyz
- Temperature(MATERIAL)
- Temperature(INITIAL)
- Temperature(LOAD)
- Temperature(BOTH)
"""
#if not self.plot_applied_loads:
#model.log.debug('self.plot_applied_loads=False')
#return icase
if not xref_loads:
model.log.debug('returning from plot_applied_loads_early')
return icase
try:
#form = []
out = get_load_arrays(
model, subcase_id,
eid_map=self.eid_map, node_ids=self.node_ids,
normals=self.normals, nid_map=self.nid_map,)
is_loads, is_temperatures, temperature_data, load_data = out
#self.log.info('subcase_id=%s is_loads=%s is_temperatures=%s' % (
#subcase_id, is_loads, is_temperatures))
if is_loads:
centroidal_pressures, forces, spcd = load_data
if np.abs(centroidal_pressures).max():
pressure_res = GuiResult(subcase_id, header='Pressure', title='Pressure',
location='centroid', scalar=centroidal_pressures)
cases[icase] = (pressure_res, (0, 'Pressure'))
form0.append(('Pressure', icase, []))
icase += 1
if np.abs(forces.max() - forces.min()) > 0.0:
fxyz = forces[:, :3]
mxyz = forces[:, 3:]
fscalar = np.linalg.norm(fxyz, axis=1)
mscalar = np.linalg.norm(mxyz, axis=1)
if fscalar.max() > 0:
titles = ['Force XYZ']
headers = titles
assert fxyz.shape[1] == 3, fxyz.shape
assert fxyz.shape[0] == len(fscalar)
scales = [1.0]
force_xyz_res = ForceTableResults(
subcase_id, titles, headers, fxyz, fscalar,
scales, data_formats=None,
nlabels=None, labelsize=None, ncolors=None, colormap=colormap,
set_max_min=False, uname='NastranGeometry')
force_xyz_res.save_defaults()
cases[icase] = (force_xyz_res, (0, 'Force XYZ'))
form0.append(('Force XYZ', icase, []))
icase += 1
if mscalar.max() > 0:
titles = ['Moment XYZ']
headers = titles
assert mxyz.shape[1] == 3, mxyz.shape
assert mxyz.shape[0] == len(mscalar)
scales = [1.0]
moment_xyz_res = ForceTableResults(
subcase_id, titles, headers, mxyz, mscalar,
scales, data_formats=None,
nlabels=None, labelsize=None, ncolors=None, colormap=colormap,
set_max_min=False, uname='NastranGeometry')
moment_xyz_res.save_defaults()
cases[icase] = (moment_xyz_res, (0, 'Moment XYZ'))
form0.append(('Moment XYZ', icase, []))
icase += 1
if np.abs(spcd.max() - spcd.min()) > 0.0:
t123 = spcd[:, :3]
tnorm = norm(t123, axis=1)
assert len(tnorm) == len(spcd[:, 2]), len(spcd[:, 2])
assert len(tnorm) == len(self.nid_map)
spcd_x_res = GuiResult(subcase_id, header='SPCDx', title='SPCDx',
location='node', scalar=forces[:, 0])
spcd_y_res = GuiResult(subcase_id, header='SPCDy', title='SPCDy',
location='node', scalar=forces[:, 1])
spcd_z_res = GuiResult(subcase_id, header='SPCDz', title='SPCDz',
location='node', scalar=forces[:, 2])
spcd_xyz_res = GuiResult(subcase_id, header='SPCD XYZ', title='SPCD XYZ',
location='node', scalar=tnorm)
cases[icase] = (spcd_x_res, (0, 'SPCDx'))
form0.append(('SPCDx', icase, []))
icase += 1
cases[icase] = (spcd_y_res, (0, 'SPCDy'))
form0.append(('SPCDy', icase, []))
icase += 1
cases[icase] = (spcd_z_res, (0, 'SPCDz'))
form0.append(('SPCDz', icase, []))
icase += 1
cases[icase] = (spcd_xyz_res, (0, 'SPCD XYZ'))
form0.append(('SPCD XYZ', icase, []))
icase += 1
if is_temperatures:
temperature_key, temperatures = temperature_data
assert len(temperatures) == len(self.nid_map)
temperature_res = GuiResult(
subcase_id, header=temperature_key, title=temperature_key,
location='node', scalar=temperatures)
cases[icase] = (temperature_res, (0, temperature_key))
form0.append((temperature_key, icase, []))
icase += 1
except KeyError:
stringio = StringIO()
traceback.print_exc(file=stringio)
sout = stringio.getvalue()
self.gui.log_error(sout)
print(sout)
return icase
def load_nastran_results(self, results_filename):
"""
Loads the Nastran results into the GUI
"""
model_name = 'main'
self.scalar_bar_actor.VisibilityOn()
self.scalar_bar_actor.Modified()
log = self.gui.log
if isinstance(results_filename, str):
print("trying to read...%s" % results_filename)
ext = os.path.splitext(results_filename)[1].lower()
if ext == '.op2':
op2_filename = results_filename
model = OP2(log=log, debug=True)
model.IS_TESTING = False
if 0: # pragma: no cover
model._results.saved = set()
all_results = model.get_all_results()
for result in DESIRED_RESULTS:
if result in all_results:
model._results.saved.add(result)
model.read_op2(op2_filename, combine=False)
if not IS_TESTING or self.is_testing_flag:
log.info(model.get_op2_stats())
# print(model.get_op2_stats())
elif ext == '.nod':
self.gui.load_patran_nod(results_filename)
self.gui.cycle_results_explicit() # start at icase=0
return
elif ext == '.h5' and IS_H5PY:
model = OP2(log=log, debug=True)
hdf5_filename = results_filename
model.load_hdf5_filename(hdf5_filename, combine=False)
#elif ext == '.pch':
#raise NotImplementedError('*.pch is not implemented; filename=%r' % op2_filename)
#elif ext == '.f06':
#model = F06(log=log, debug=True)
#model.set_vectorization(True)
#model.read_f06(op2_filename)
else:
#print("error...")
msg = 'extension=%r is not supported; filename=%r' % (ext, op2_filename)
raise NotImplementedError(msg)
else:
model = op2_filename
op2_filename = op2_filename.filename
if self.save_data:
self.model_results = model
#print(model.print_results())
#self.isubcase_name_map[self.isubcase] = [Subtitle, Label]
# tansform displacements into global coordinates
try:
icd_transform = self.icd_transform
#transforms = self.transforms
except AttributeError:
log.error('Skipping displacment transformation')
else:
model.transform_displacements_to_global(
icd_transform, self.model.coords, xyz_cid0=self.xyz_cid0)
#if 0:
#cases = OrderedDict()
#self.isubcase_name_map = {}
#form = []
#icase = 0
#else:
cases = self.result_cases
form = self.get_form()
icase = len(cases)
# form = self.res_widget.get_form()
#subcase_ids = model.isubcase_name_map.keys()
#self.isubcase_name_map = model.isubcase_name_map
# self.isubcase_name_map = model.subcase_key
#print(self.isubcase_name_map)
for isubcase, values in model.isubcase_name_map.items():
if not isinstance(isubcase, integer_types):
print('isubcase type =', type(isubcase))
continue
if isinstance(values, str):
# eigenvalue???
label = values
log.debug('label_str = %r' % label)
elif isinstance(values, list):
log.debug(str(values))
subtitle, superelement_adaptivity, analysis_code, label = values
del analysis_code
else:
log.debug(str(values))
log.debug(str(type(values)))
raise RuntimeError(values)
if superelement_adaptivity:
subcase_name = '%s: %s' % (subtitle, superelement_adaptivity)
else:
subcase_name = subtitle
self.isubcase_name_map[isubcase] = [subcase_name, label]
del subtitle, label
# self.isubcase_name_map = {subcase_id : label for
# in model.isubcase_name_map.items()}
form = self._fill_op2_output(results_filename, cases, model, form, icase, log)
self.gui._finish_results_io2(model_name, form, cases)
#name = 'spike'
#eids = np.arange(10, 40)
#self.create_group_with_name(name, eids)
#self.post_group_by_name(name)
def _fill_op2_output(self, op2_filename, cases, model, form, icase, log):
"""
SOL 101 (Static)
----------------
Subcase 1
- DisplacementXYZ
- SPCForceX
- ...
- Stress
- oxx
- Strain
SOL 103 (modal)
---------------
Subcase 1
- mode 1; eigr=123.4
- EigenvectorXYZ
- Stress
- mode 2: eigr=156.3
- EigenvectorXYZ
- Stress
SOL 109 (Freq)
--------------
Subcase 1
- freq=123.4
- DisplacementXYZ
- Stress
SOL 105 (Buckling)
------------------
Subcase 1
- Preload
- DisplacementXYZ
- mode 1; eigr=123.4
- EigenvectorXYZ
- Stress
"""
keys = model.get_key_order()
assert keys is not None, keys
#print('keys_order =', keys)
disp_dict = defaultdict(list)
stress_dict = defaultdict(list)
strain_dict = defaultdict(list)
force_dict = defaultdict(list)
strain_energy_dict = defaultdict(list)
gpstress_dict = defaultdict(list)
header_dict = {}
keys_map = {}
key_itime = []
icase, form_optimization = fill_responses(cases, model, icase)
for key in keys:
unused_is_data, unused_is_static, unused_is_real, times = _get_times(model, key)
if times is None:
# we dynamically created the keys and created extra ones
continue
#assert times is not None # gen22x_modes
#print('--------------')
#print('key = %r' % str(key))
self.stress[key] = StressObject(model, key, self.element_ids, is_stress=True)
self.strain[key] = StressObject(model, key, self.element_ids, is_stress=False)
#header_dict[(key, 0)] = '; Static'
unused_formi = []
unused_form_time = []
ncases_old = icase
icase = self._fill_op2_oug_oqg(cases, model, key, icase,
disp_dict, header_dict, keys_map,
log)
icase = self._fill_grid_point_forces(cases, model, key, icase,
disp_dict, header_dict, keys_map)
# stress
icase = self._fill_op2_centroidal_stress(
cases, model, times, key, icase,
stress_dict, header_dict, keys_map)
# stress
icase = self._fill_op2_centroidal_strain(
cases, model, times, key, icase,
strain_dict, header_dict, keys_map)
# force
icase = self._fill_op2_centroidal_force(
cases, model, times, key, icase,
force_dict, header_dict, keys_map)
# strain energy
icase = self._fill_op2_centroidal_strain_energy(
cases, model, times, key, icase,
strain_energy_dict, header_dict, keys_map)
# force
icase = self._fill_op2_gpstress(
cases, model, times, key, icase,
gpstress_dict, header_dict, keys_map)
ncases = icase - ncases_old
#print('ncases=%s icase=%s' % (ncases, icase))
#assert ncases > 0, ncases
if ncases:
for itime, unused_dt in enumerate(times):
new_key = (key, itime)
key_itime.append(new_key)
# ----------------------------------------------------------------------
#print('Key,itime:')
#for key_itimei in key_itime:
#print(' %s' % str(key_itimei))
unused_form_out = []
form_resultsi = form_optimization
basename = os.path.basename(op2_filename).rstrip()
form_results = (basename + '-Results', None, form_optimization)
if len(key_itime) == 0:
#print('header_dict =', header_dict)
#print('key_itime =', key_itime)
if form_optimization:
form.append(form_results)
else:
log.error('No OP2 results were found')
return form
form = _build_sort1_table(
key_itime, keys_map, header_dict,
form, form_results, form_resultsi,
disp_dict, stress_dict, strain_dict, force_dict,
strain_energy_dict, gpstress_dict,
log)
return form
def clear_nastran(self):
"""cleans up variables specific to Nastran"""
self.eid_map = {}
self.nid_map = {}
self.eid_to_nid_map = {}
self.element_ids = None
self.node_ids = None
def jsonify(comment_lower: str) -> str:
"""pyNastran: SPOINT={'id':10, 'xyz':[10.,10.,10.]}"""
sline = comment_lower.split('=')
rhs = sline[1].rstrip()
return rhs.replace("'", '"').replace('}', ',}').replace(',,}', ',}')
def _build_sort1_table(key_itime, keys_map, header_dict,
form, form_results, form_resultsi,
disp_dict, stress_dict, strain_dict, force_dict,
strain_energy_dict, gpstress_dict, log):
"""combines the SORT1-based OP2 results into a SORT1 table"""
is_results = False
form_resultsi_subcase = []
#for key, value in header_dict.items():
#print(key, value)
# (isubcase, analysis_code, sort_method,
# count, ogs, superelement_adaptivity_index) = key
key_itime0 = key_itime[0]
key0 = key_itime0[0]
# (isubcase, analysis_code, sort_method,
# count, ogs, superelement_adaptivity_index, pval_step) = key
subcase_id_old = key0[0]
count_old = key0[3]
ogs_old = key0[4]
subtitle_old = key0[5]
subtitle_old, label_old, superelement_adaptivity_index_old, unused_pval_step_old = keys_map[key0]
del label_old
del superelement_adaptivity_index_old
# now that we have the data built, we put it in the form
# in sorted order
#
# TODO: consider pval_step
for key, itime in key_itime:
# (isubcase, analysis_code, sort_method,
# count, ogs, superelement_adaptivity_index, pval_step) = key
#print('key =', key)
subcase_id = key[0]
count = key[3]
ogs = key[4]
#print('*ogs =', ogs)
#subtitle = key[4]
try:
subtitle, unused_label, superelement_adaptivity_index, unused_pval_step = keys_map[key]
except:
subcase_id = subcase_id_old
subtitle = subtitle_old + '?'
superelement_adaptivity_index = '?'
raise
#print('key =', key)
if subcase_id != subcase_id_old or subtitle != subtitle_old or ogs != ogs_old:
count_str = '' if count == 0 else ' ; opt_count=%s' % count_old
ogs_str = '' if ogs == 0 else '; OGS=%s' % ogs_old
subcase_str = 'Subcase %s; %s%s%s%s' % (
subcase_id_old, subtitle_old, superelement_adaptivity_index, count_str, ogs_str)
#print(subcase_str)
res = (
subcase_str.rstrip('; '),
None,
form_resultsi_subcase
)
form_resultsi.append(res)
form_resultsi_subcase = []
subcase_id_old = subcase_id
subtitle_old = subtitle
count_old = count
ogs_old = ogs
try:
header = header_dict[(key, itime)]
except KeyError: # this hits for strain energy
msg = 'Missing (key, itime) in header_dict\n'
msg += ' key=%s\n' % str(key)
(subcase, analysis_code, sort_method,
count, ogs, superelement_adaptivity_index, pval_step) = key
msg += f' subcase={subcase}\n'
msg += f' analysis_code={analysis_code}\n'
msg += f' sort_method={sort_method}\n'
msg += f' count={count}\n'
msg += f' ogs={ogs}\n'
msg += f' superelement_adaptivity_index={superelement_adaptivity_index!r}\n'
msg += f' pval_step={pval_step!r}\n'
msg += ' itime=%s\n' % itime
msg += ' %s\n' % str((key, itime))
msg += 'Possible (key, time):\n'
for keyi in header_dict:
msg += ' %s\n' % str(keyi)
#print(msg.rstrip())
#print('expected = (%s, %r)\n' % (str(key), itime))
log.error(msg.rstrip() + '\n')
#self.log.error('expected = (%s, %r)\n' % (str(key), itime))
continue
#raise KeyError(msg)
try:
header = header.strip()
except:
print('header = %r' % header)
raise
form_outi = []
form_out = (header, None, form_outi)
disp_formi = disp_dict[(key, itime)]
stress_formi = stress_dict[(key, itime)]
strain_formi = strain_dict[(key, itime)]
force_formi = force_dict[(key, itime)]
strain_energy_formi = strain_energy_dict[(key, itime)]
gpstress_formi = gpstress_dict[(key, itime)]
if disp_formi:
form_outi += disp_formi
#form_outi.append(('Disp', None, disp_formi))
if stress_formi:
form_outi.append(('Stress', None, stress_formi))
is_results = True
if strain_formi:
form_outi.append(('Strain', None, strain_formi))
is_results = True
if force_formi:
form_outi.append(('Force', None, force_formi))
is_results = True
if strain_energy_formi:
form_outi.append(('Strain Energy', None, strain_energy_formi))
is_results = True
if gpstress_formi:
form_outi.append(('Grid Point Stresses', None, gpstress_formi))
is_results = True
if form_outi:
is_results = True
form_resultsi_subcase.append(form_out)
#break
#print("subcase_id = ", subcase_id)
if subcase_id:
count_str = '' if count == 0 else ' ; opt_count=%s' % count_old
ogs_str = '' if ogs == 0 else '; OGS=%s' % ogs_old
subcase_str = 'Subcase %s; %s%s%s' % (subcase_id, subtitle, count_str, ogs_str)
#print('*', subcase_str)
res = (
subcase_str.strip('; '),
None,
form_resultsi_subcase
)
form_resultsi.append(res)
assert len(form_out) > 0, form_out
form_resultsi_subcase = []
if is_results:
form.append(form_results)
assert len(form_out) > 0, form_out
#print('formi =', formi)
#print('form_out =', form_out)
#print('form_resultsi =', form_resultsi)
#print('form_results =', form_results)
#print(form)
#if len(formi):
#form.append(form0)
#print(form)
#aa
#print('form', form)
#print('form_results =', form_results)
return form
def _build_normals_quality(settings: Settings,
model: BDF, eid_map, nelements: int, cases, form0, icase: int,
xyz_cid0,
material_coord, material_theta,
min_interior_angle, max_interior_angle, dideal_theta,
area, max_skew_angle, taper_ratio,
max_warp_angle, area_ratio, min_edge_length, max_aspect_ratio,
make_offset_normals_dim=True,
make_xyz=False, make_nnodes_result=False) -> Tuple[int, Any]:
"""
Creates some nastran specific results
creates:
- ElementDim
- Normal X/Y/Z
- NNodes/Elem
- Area
- Min/Max Interior Angle
- Skew Angle
- Taper Ratio
- Area Ratio
- MaterialCoord
- MaterialTheta
"""
colormap = settings.colormap
#ielement = 0
#nelements = self.element_ids.shape[0]
normals = None
offset = None
xoffset = None
yoffset = None
zoffset = None
element_dim = None
nnodes_array = None
if make_offset_normals_dim:
out = build_offset_normals_dims(model, eid_map, nelements)
normals, offset, xoffset, yoffset, zoffset, element_dim, nnodes_array = out
# if not a flat plate
#if min(nxs) == max(nxs) and min(nxs) != 0.0:
#is_element_dim = element_dim is not None and np.max(element_dim) != np.min(element_dim)
is_element_dim = element_dim is not None
if is_element_dim and isfinite_and_greater_than(element_dim, -1):
eid_dim_res = GuiResult(0, header='ElementDim', title='ElementDim',
location='centroid', scalar=element_dim, mask_value=-1)
cases[icase] = (eid_dim_res, (0, 'ElementDim'))
#is_shell = normals is not None and np.abs(normals).max() > 0. # NaN -> 2.0
is_shell = normals is not None and isfinite(normals) # using NaNs
# we have to add the 2nd/3rd lines to make sure bars are getting into this check
is_solid = (
isfinite_and_nonzero(min_interior_angle) and
isfinite_and_nonzero(max_interior_angle)
)
#print('is_shell=%s is_solid=%s' % (is_shell, is_solid))
if is_shell:
if make_offset_normals_dim:
nx_res = GuiResult(
0, header='NormalX', title='NormalX',
location='centroid', scalar=normals[:, 0], data_format='%.2f')
ny_res = GuiResult(
0, header='NormalY', title='NormalY',
location='centroid', scalar=normals[:, 1], data_format='%.2f')
nz_res = GuiResult(
0, header='NormalZ', title='NormalZ',
location='centroid', scalar=normals[:, 2], data_format='%.2f')
nxyz_res = NormalResult(0, 'Normals', 'Normals',
nlabels=2, labelsize=5, ncolors=2,
colormap=colormap, data_format='%.1f',
uname='NormalResult')
if settings.nastran_is_element_quality:
area_res = GuiResult(0, header='Area', title='Area',
location='centroid', scalar=area)
min_edge_length_res = GuiResult(
0, header='Min Edge Length', title='Min Edge Length',
location='centroid', scalar=min_edge_length)
min_theta_res = GuiResult(
0, header='Min Interior Angle', title='Min Interior Angle',
location='centroid', scalar=np.degrees(min_interior_angle))
max_theta_res = GuiResult(
0, header='Max Interior Angle', title='Max Interior Angle',
location='centroid', scalar=np.degrees(max_interior_angle))
dideal_theta_res = GuiResult(
0, header='Delta Ideal Angle', title='Delta Ideal Angle',
location='centroid', scalar=np.degrees(dideal_theta))
skew = np.degrees(max_skew_angle)
skew_res = GuiResult(
0, header='Max Skew Angle', title='MaxSkewAngle',
location='centroid', scalar=skew)
aspect_res = GuiResult(
0, header='Aspect Ratio', title='AspectRatio',
location='centroid', scalar=max_aspect_ratio)
form_checks = []
form0.append(('Element Checks', None, form_checks))
if is_element_dim:
form_checks.append(('ElementDim', icase, []))
if make_offset_normals_dim and make_nnodes_result:
nnodes_res = GuiResult(
0, header='NNodes/Elem', title='NNodes/Elem',
location='centroid', scalar=nnodes_array)
form_checks.append(('NNodes', icase + 1, []))
cases[icase + 1] = (nnodes_res, (0, 'NNodes'))
icase += 1
if make_offset_normals_dim:
# 0 is element_dim
cases[icase + 1] = (nx_res, (0, 'NormalX'))
cases[icase + 2] = (ny_res, (0, 'NormalY'))
cases[icase + 3] = (nz_res, (0, 'NormalZ'))
cases[icase + 4] = (nxyz_res, (0, 'Normal'))
form_checks.append(('NormalX', icase + 1, []))
form_checks.append(('NormalY', icase + 2, []))
form_checks.append(('NormalZ', icase + 3, []))
form_checks.append(('Normal', icase + 4, []))
icase += 5
if settings.nastran_is_element_quality:
cases[icase] = (area_res, (0, 'Area'))
cases[icase + 1] = (min_edge_length_res, (0, 'Min Edge Length'))
cases[icase + 2] = (min_theta_res, (0, 'Min Interior Angle'))
cases[icase + 3] = (max_theta_res, (0, 'Max Interior Angle'))
cases[icase + 4] = (dideal_theta_res, (0, 'Delta Ideal Angle'))
cases[icase + 5] = (skew_res, (0, 'Max Skew Angle'))
cases[icase + 6] = (aspect_res, (0, 'Aspect Ratio'))
form_checks.append(('Area', icase, []))
form_checks.append(('Min Edge Length', icase + 1, []))
form_checks.append(('Min Interior Angle', icase + 2, []))
form_checks.append(('Max Interior Angle', icase + 3, []))
form_checks.append(('Delta Ideal Angle', icase + 4, []))
form_checks.append(('Max Skew Angle', icase + 5, []))
form_checks.append(('Aspect Ratio', icase + 6, []))
icase += 7
if np.any(np.isfinite(area_ratio)) and np.nanmax(area_ratio) > 1.:
arearatio_res = GuiResult(
0, header='Area Ratio', title='Area Ratio',
location='centroid', scalar=area_ratio)
cases[icase] = (arearatio_res, (0, 'Area Ratio'))
form_checks.append(('Area Ratio', icase, []))
icase += 1
if np.any(np.isfinite(taper_ratio)) and np.nanmax(taper_ratio) > 1.:
taperratio_res = GuiResult(
0, header='Taper Ratio', title='Taper Ratio',
location='centroid', scalar=taper_ratio)
cases[icase] = (taperratio_res, (0, 'Taper Ratio'))
form_checks.append(('Taper Ratio', icase, []))
icase += 1
if isfinite_and_nonzero(max_warp_angle):
warp_res = GuiResult(
0, header='Max Warp Angle', title='MaxWarpAngle',
location='centroid', scalar=np.degrees(max_warp_angle))
cases[icase] = (warp_res, (0, 'Max Warp Angle'))
form_checks.append(('Max Warp Angle', icase, []))
icase += 1
#if (np.abs(xoffset).max() > 0.0 or np.abs(yoffset).max() > 0.0 or
#np.abs(zoffset).max() > 0.0):
#if isfinite(max_warp_angle):
# offsets
if make_offset_normals_dim:
offset_res = GuiResult(
0, header='Offset', title='Offset',
location='centroid', scalar=offset, data_format='%g')
offset_x_res = GuiResult(
0, header='OffsetX', title='OffsetX',
location='centroid', scalar=xoffset, data_format='%g')
offset_y_res = GuiResult(
0, header='OffsetY', title='OffsetY',
location='centroid', scalar=yoffset, data_format='%g')
offset_z_res = GuiResult(
0, header='OffsetZ', title='OffsetZ',
location='centroid', scalar=zoffset, data_format='%g')
cases[icase] = (offset_res, (0, 'Offset'))
cases[icase + 1] = (offset_x_res, (0, 'OffsetX'))
cases[icase + 2] = (offset_y_res, (0, 'OffsetY'))
cases[icase + 3] = (offset_z_res, (0, 'OffsetZ'))
form_checks.append(('Offset', icase, []))
form_checks.append(('OffsetX', icase + 1, []))
form_checks.append(('OffsetY', icase + 2, []))
form_checks.append(('OffsetZ', icase + 3, []))
icase += 4
if 0: # pragma: no cover
xyz_offset = np.vstack([xoffset, yoffset, zoffset]).T
titles = ['Offset XYZ']
headers = titles
assert xyz_offset.shape[1] == 3, xyz_offset.shape
assert xyz_offset.shape[0] == len(offset)
scales = [1.0]
subcase_id = 0
#methods = ['magnitude', 'x', 'y', 'z']
offset_xyz_res = ElementalTableResults(
subcase_id, titles, headers, xyz_offset, offset, scales,
#methods,
)
offset_xyz_res.save_defaults()
cases[icase] = (offset_z_res, (0, 'OffsetZ'))
form_checks.append(('OffsetXYZ', icase, []))
icase += 1
if make_xyz or IS_TESTING:
x_res = GuiResult(
0, header='X', title='X',
location='node', scalar=xyz_cid0[:, 0], data_format='%g')
y_res = GuiResult(
0, header='Y', title='Y',
location='node', scalar=xyz_cid0[:, 1], data_format='%g')
z_res = GuiResult(
0, header='Z', title='Z',
location='node', scalar=xyz_cid0[:, 2], data_format='%g')
cases[icase] = (x_res, (0, 'X'))
cases[icase + 1] = (y_res, (0, 'Y'))
cases[icase + 2] = (z_res, (0, 'Z'))
form_checks.append(('X', icase + 0, []))
form_checks.append(('Y', icase + 1, []))
form_checks.append(('Z', icase + 2, []))
icase += 3
elif is_solid:
# only solid elements
form_checks = []
form0.append(('Element Checks', None, form_checks))
if is_element_dim:
form_checks.append(('ElementDim', icase, []))
icase += 1
if settings.nastran_is_element_quality:
min_edge_length_res = GuiResult(
0, header='Min Edge Length', title='Min Edge Length',
location='centroid', scalar=min_edge_length)
min_theta_res = GuiResult(
0, header='Min Interior Angle', title='Min Interior Angle',
location='centroid', scalar=np.degrees(min_interior_angle))
max_theta_res = GuiResult(
0, header='Max Interior Angle', title='Max Interior Angle',
location='centroid', scalar=np.degrees(max_interior_angle))
#skew = 90. - np.degrees(max_skew_angle)
#skew_res = GuiResult(0, header='Max Skew Angle', title='MaxSkewAngle',
#location='centroid', scalar=skew)
form_checks.append(('Min Edge Length', icase, []))
form_checks.append(('Min Interior Angle', icase + 1, []))
form_checks.append(('Max Interior Angle', icase + 2, []))
#form_checks.append(('Max Skew Angle', icase + 3, []))
cases[icase] = (min_edge_length_res, (0, 'Min Edge Length'))
cases[icase + 1] = (min_theta_res, (0, 'Min Interior Angle'))
cases[icase + 2] = (max_theta_res, (0, 'Max Interior Angle'))
#cases[icase + 3] = (skew_res, (0, 'Max Skew Angle'))
icase += 3
else:
form0.append(('ElementDim', icase, []))
icase += 1
if isgreater_int(material_coord, -1):
material_coord_res = GuiResult(
0, header='MaterialCoord', title='MaterialCoord',
location='centroid',
scalar=material_coord, mask_value=-1, data_format='%i')
cases[icase] = (material_coord_res, (0, 'MaterialCoord'))
form0.append(('MaterialCoord', icase, []))
icase += 1
if isfinite(material_theta):
material_theta_res = GuiResult(
0, header='MaterialTheta', title='MaterialTheta',
location='centroid',
scalar=material_theta, data_format='%.3f')
cases[icase] = (material_theta_res, (0, 'MaterialTheta'))
form0.append(('MaterialTheta', icase, []))
icase += 1
return icase, normals
def _build_materials(model, pcomp, pshell, is_pshell_pcomp,
cases, form0, icase):
"""
creates:
- Thickness
- nPlies (composite only)
- Material ID
- E_11
- E_22
- E_33
- Is Isotropic?
"""
for i, pshell_pcompi in enumerate([pshell, pcomp]):
mids = pshell_pcompi['mids']
thickness = pshell_pcompi['thickness']
if 'nplies' in pshell_pcompi:
nplies = pshell_pcompi['nplies']
if nplies is not None and nplies.max() > 0:
nplies_res = GuiResult(0, header='Number of Plies', title='nPlies',
location='centroid', scalar=nplies, mask_value=0)
cases[icase] = (nplies_res, (0, 'Number of Plies'))
form0.append(('Number of Plies', icase, []))
icase += 1
if mids is None:
continue
nlayers = mids.shape[1]
for ilayer in range(nlayers):
if len(thickness.shape) == 2:
thicknessi = thickness[:, ilayer]
else:
## TODO: I think this is used by a non-PSHELL/PCOMP case
#print('B-shape...i=%s ilayer=%s' % (i, ilayer))
thicknessi = thickness
form_layer = []
#if i == 1 and ilayer == 0:
#print('thicknessi = ', thicknessi)
if isfinite_and_nonzero(thicknessi):
if i == 1 and ilayer == 0:
tword = 'Total Thickness' # thickness is nan
elif i == 0 and ilayer == 1:
tword = '12/t^3'
elif i == 0 and ilayer == 2:
tword = 'ts/t'
elif i == 0 and ilayer == 3:
tword = 'mid4'
else:
tword = 'Thickness'
if tword != 'mid4':
t_res = GuiResult(0, header=tword, title=tword,
location='centroid', scalar=thicknessi)
cases[icase] = (t_res, (0, tword))
form_layer.append((tword, icase, []))
icase += 1
midsi = mids[:, ilayer]
if midsi.max() == 0:
pass
#if not(i == 1 and ilayer == 0):
#print('cant find anything in ilayer=%s' % ilayer)
#continue
else:
imids_masked = midsi == 0
has_mat8, has_mat11, e11, e22, e33 = get_material_arrays(model, midsi)
mid_res = GuiResult(0, header='MaterialID', title='MaterialID',
location='centroid', scalar=midsi, mask_value=0)
cases[icase] = (mid_res, (0, 'MaterialID'))
form_layer.append(('MaterialID', icase, []))
icase += 1
if has_mat11: # also implicitly has_mat8
is_orthotropic = not (np.array_equal(e11, e22) and np.array_equal(e11, e33))
elif has_mat8:
is_orthotropic = not np.array_equal(e11, e22)
else:
is_orthotropic = False
# np.nanmax(e11) > 0. can fail if e11=[nan, nan]
e112 = np.fmax.reduce(e11)
is_e11 = True
if np.isnan(e112):
is_e11 = False
#
if is_orthotropic:
e11_res = GuiResult(0, header='E_11', title='E_11',
location='centroid', scalar=e11, data_format='%.3e')
e22_res = GuiResult(0, header='E_22', title='E_22',
location='centroid', scalar=e22, data_format='%.3e')
cases[icase] = (e11_res, (0, 'E_11'))
cases[icase + 1] = (e22_res, (0, 'E_22'))
form_layer.append(('E_11', icase, []))
form_layer.append(('E_22', icase + 1, []))
icase += 2
is_isotropic = np.zeros(len(e11), dtype='int8')
is_isotropic[imids_masked] = -1
if has_mat11:
is_isotropic[(e11 == e22) | (e11 == e33)] = 1
e33_res = GuiResult(0, header='E_33', title='E_33',
location='centroid', scalar=e33, data_format='%.3e')
cases[icase] = (e33_res, (0, 'E_33'))
form_layer.append(('E_33', icase, []))
icase += 1
else:
is_isotropic[e11 == e22] = 1
iso_res = GuiResult(
0, header='IsIsotropic?', title='IsIsotropic?',
location='centroid', scalar=is_isotropic, data_format='%i',
mask_value=-1)
cases[icase] = (iso_res, (0, 'Is Isotropic?'))
form_layer.append(('Is Isotropic?', icase, []))
icase += 1
elif is_e11:
# isotropic
assert np.nanmax(e11) > 0, np.nanmax(e11)
e11_res = GuiResult(0, header='E', title='E',
location='centroid', scalar=e11, data_format='%.3e')
cases[icase] = (e11_res, (0, 'E'))
form_layer.append(('E', icase, []))
icase += 1
#print('form_layer =', form_layer)
if form_layer:
if nlayers == 1:
form0 += form_layer
else:
word = get_nastran_gui_layer_word(i, ilayer, is_pshell_pcomp)
form0.append((word, None, form_layer))
return icase
def _build_optimization(model: BDF, pids: np.ndarray, upids: np.ndarray, nelements: int,
cases, form0, icase: int) -> int:
"""
Creates the optimization visualization. Supports:
- DVPREL1/2 shell thickness:
- DV Region
- DVPREL Init - t
- DVPREL Min - t
- DVPREL Max - t
"""
if upids is None:
return icase
if len(model.properties) and len(model.dvprels):
# len(model.dvprels) + len(model.dvcrels) + len(model.dvmrels) + len(model.desvars)
#dvmrel_init = np.zeros(nelements, dtype='int32')
#dvgrel_init = np.zeros(nelements, dtype='int32')
out_dict = model._get_dvprel_ndarrays(nelements, pids)
optimization_cases = []
for key, dvprel_data in out_dict.items():
design_region, dvprel_init, dvprel_min, dvprel_max = dvprel_data
if np.nanmax(design_region) == 0:
continue
region_res = GuiResult(
0, header='DV Region', title='DV Region',
location='centroid', scalar=design_region, mask_value=0)
t_init_res = GuiResult(
0, header='DVPREL Init - %s' % key, title='DVPREL Init - %s' % key,
location='centroid', scalar=dvprel_init)
opt_cases = []
cases[icase] = (region_res, (0, 'DV Region'))
cases[icase + 1] = (t_init_res, (0, 'DVPREL Init - %s' % key))
opt_cases.append(('DV Region', icase, []))
opt_cases.append(('DVPREL Init - %s' % key, icase + 1, []))
icase += 2
if np.any(np.isfinite(dvprel_min)):
t_min_res = GuiResult(
0, header='DVPREL Min - %s' % key, title='DVPREL Min - %s' % key,
location='centroid', scalar=dvprel_min)
cases[icase] = (t_min_res, (0, 'DVPREL Min - %s' % key))
opt_cases.append(('DVPREL Min - %s' % key, icase, []))
icase += 1
if np.any(np.isfinite(dvprel_max)):
t_max_res = GuiResult(
0, header='DVPREL Max - %s' % key, title='DVPREL Max - %s' % key,
location='centroid', scalar=dvprel_max)
cases[icase] = (t_max_res, (0, 'DVPREL Max - %s' % key))
opt_cases.append(('DVPREL Max - %s' % key, icase, []))
icase += 1
optimization_cases.append((key, None, opt_cases))
if optimization_cases:
form0.append(('Optimization', None, optimization_cases))
return icase
def build_superelement_model(model: BDF, cid: int=0, fdtype: str='float32'):
models = {0 : model}
models.update(model.superelement_models)
#nmodels = len(models)
xyz_cid0 = {}
nid_cp_cd = {}
icd_transform = {}
#nid_map = {}
#inode = 0
for super_id, modeli in sorted(models.items()):
out = modeli.get_displacement_index_xyz_cp_cd(
fdtype=fdtype, idtype='int32', sort_ids=True)
icd_transformi, icp_transformi, xyz_cpi, nid_cp_cdi = out
icd_transform[super_id] = icd_transformi
xyz_cid0i = modeli.transform_xyzcp_to_xyz_cid(
xyz_cpi, nid_cp_cdi[:, 0], icp_transformi, cid=cid,
in_place=False)
if super_id in model.seloc and super_id: # in model.initial_superelement_models and 0:
# TODO: when should seloc get applied?
# during superelement creation or now?
# I'm going with superelement creation...
# I think we need to update the node locations for the superelements
# that exist before mirroring
seloc = model.seloc[super_id]
xyz_cid0i = seloc.transform(model, xyz_cid0i)
#print('model.spoints =', model.spoints)
#import json
#for spoint_id, spoint in model.spoints.items():
#if spoint.comment: # or spoint._comment?
#print('SPOINT comment=%r _comment=%r' % (spoint.comment, spoint._comment))
#comment_lower = spoint.comment.lower()
#print('comment_lower = %r' % comment_lower)
## pyNastran: SPOINT={'id':10, 'xyz':[10.,10.,10.]}
#if 'pynastran' in comment_lower and 'spoint' in comment_lower:
#dict_str = jsonify(comment_lower)
#print('dict_str = %r' % dict_str)
#dicti = json.loads(dict_str)
#print(dicti)
#for epoint_id, epoint in model.epoints.items():
#if epoints.comment:
#print('EPOINT comment=%r _comment=%r' % (spoint.comment, spoint._comment))
#sys.stdout.flush()
#------------------------------
nid_cp_cd[super_id] = nid_cp_cdi
xyz_cid0[super_id] = xyz_cid0i
return xyz_cid0, nid_cp_cd, icd_transform
def get_caero_count(model: BDF) -> Tuple[int, int, int, int]:
ncaeros = 0
ncaeros_sub = 0
#ncaeros_cs = 0
ncaeros_points = 0
ncaero_sub_points = 0
# count caeros
# sorting doesn't matter here because we're just trying to size the array
for caero in model.caeros.values():
if hasattr(caero, 'panel_points_elements'):
npoints, ncelements = caero.get_npanel_points_elements()
ncaeros_sub += npoints
ncaero_sub_points += ncelements
elif isinstance(caero, (CAERO2, BODY7)):
pass
else: # pragma: no cover
msg = '%r doesnt support panel_points_elements\n%s' % (caero.type, caero.rstrip())
raise NotImplementedError(msg)
for unused_eid, caero in sorted(model.caeros.items()):
if isinstance(caero, (CAERO1, CAERO3, CAERO4, CAERO5, CAERO7)):
ncaeros_points += 4
ncaeros += 1
elif isinstance(caero, (CAERO2, BODY7)):
points, elems = caero.get_points_elements_3d()
if points is None:
continue
ncaeros_points += points.shape[0]
ncaeros += elems.shape[0]
else: # pragma: no cover
msg = '%r doesnt support panel counter\n%s' % (caero.type, caero.rstrip())
raise NotImplementedError(msg)
return ncaeros, ncaeros_sub, ncaeros_points, ncaero_sub_points
def get_caero_points(model: BDF, box_id_to_caero_element_map: Dict[int, Any]):
has_caero = False
num_prev = 0
ncaeros_sub = 0
if model.caeros:
caero_points = []
for unused_eid, caero in sorted(model.caeros.items()):
if caero.type in ('CAERO1', 'CAERO4', 'CAERO7'):
box_ids = caero.box_ids
nboxes = len(box_ids.ravel())
if nboxes > 1000:
print('skipping nboxes=%s for:\n%s' % (nboxes, str(caero)))
continue
ncaeros_sub += 1
pointsi, elementsi = caero.panel_points_elements()
caero_points.append(pointsi)
for i, box_id in enumerate(caero.box_ids.flat):
box_id_to_caero_element_map[box_id] = elementsi[i, :] + num_prev
num_prev += pointsi.shape[0]
elif caero.type in ('CAERO2', 'BODY7'):
pass
else:
print('caero\n%s' % caero)
if ncaeros_sub:
caero_points = np.vstack(caero_points)
has_caero = True
if ncaeros_sub == 0:
caero_points = np.empty((0, 3))
return caero_points, has_caero
| [
"pyNastran.gui.gui_objects.gui_result.NormalResult",
"vtk.vtkPoints",
"collections.defaultdict",
"numpy.argsort",
"numpy.arange",
"numpy.degrees",
"vtk.vtkBiQuadraticQuad",
"io.StringIO",
"vtk.vtkLine",
"vtk.vtkTriangle",
"numpy.vstack",
"numpy.nanmax",
"pyNastran.bdf.mesh_utils.delete_bad_e... | [((277333, 277366), 'pyNastran.femutils.nan.isgreater_int', 'isgreater_int', (['material_coord', '(-1)'], {}), '(material_coord, -1)\n', (277346, 277366), False, 'from pyNastran.femutils.nan import isfinite, isfinite_and_greater_than, isfinite_and_nonzero, isgreater_int\n'), ((277714, 277738), 'pyNastran.femutils.nan.isfinite', 'isfinite', (['material_theta'], {}), '(material_theta)\n', (277722, 277738), False, 'from pyNastran.femutils.nan import isfinite, isfinite_and_greater_than, isfinite_and_nonzero, isgreater_int\n'), ((18494, 18518), 'numpy.vstack', 'np.vstack', (['xyz_cid0_full'], {}), '(xyz_cid0_full)\n', (18503, 18518), True, 'import numpy as np\n'), ((18543, 18568), 'numpy.vstack', 'np.vstack', (['nid_cp_cd_full'], {}), '(nid_cp_cd_full)\n', (18552, 18568), True, 'import numpy as np\n'), ((18625, 18644), 'numpy.unique', 'np.unique', (['all_nids'], {}), '(all_nids)\n', (18634, 18644), True, 'import numpy as np\n'), ((32708, 32721), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (32719, 32721), False, 'from collections import defaultdict, OrderedDict\n'), ((42937, 42963), 'pyNastran.gui.utils.vtk.vtk_utils.get_numpy_idtype_for_vtk', 'get_numpy_idtype_for_vtk', ([], {}), '()\n', (42961, 42963), False, 'from pyNastran.gui.utils.vtk.vtk_utils import get_numpy_idtype_for_vtk, numpy_to_vtk_points, create_vtk_cells_of_constant_element_type\n'), ((47460, 47503), 'pyNastran.gui.utils.vtk.base_utils.numpy_to_vtkIdTypeArray', 'numpy_to_vtkIdTypeArray', (['nids_array'], {'deep': '(1)'}), '(nids_array, deep=1)\n', (47483, 47503), False, 'from pyNastran.gui.utils.vtk.base_utils import numpy_to_vtk, numpy_to_vtkIdTypeArray\n'), ((47524, 47542), 'vtk.vtkCellArray', 'vtk.vtkCellArray', ([], {}), '()\n', (47540, 47542), False, 'import vtk\n'), ((47791, 47862), 'pyNastran.gui.utils.vtk.base_utils.numpy_to_vtk', 'numpy_to_vtk', (['cell_offsets_array'], {'deep': 'deep', 'array_type': 'vtk.VTK_ID_TYPE'}), '(cell_offsets_array, deep=deep, array_type=vtk.VTK_ID_TYPE)\n', (47803, 47862), False, 'from pyNastran.gui.utils.vtk.base_utils import numpy_to_vtk, numpy_to_vtkIdTypeArray\n'), ((50636, 50665), 'pyNastran.gui.utils.vtk.vtk_utils.numpy_to_vtk_points', 'numpy_to_vtk_points', (['xyz_cid0'], {}), '(xyz_cid0)\n', (50655, 50665), False, 'from pyNastran.gui.utils.vtk.vtk_utils import get_numpy_idtype_for_vtk, numpy_to_vtk_points, create_vtk_cells_of_constant_element_type\n'), ((71560, 71577), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (71571, 71577), False, 'from collections import defaultdict, OrderedDict\n'), ((75225, 75240), 'vtk.vtkPoints', 'vtk.vtkPoints', ([], {}), '()\n', (75238, 75240), False, 'import vtk\n'), ((79124, 79139), 'vtk.vtkPoints', 'vtk.vtkPoints', ([], {}), '()\n', (79137, 79139), False, 'import vtk\n'), ((83497, 83518), 'numpy.vstack', 'np.vstack', (['all_points'], {}), '(all_points)\n', (83506, 83518), True, 'import numpy as np\n'), ((83699, 83744), 'pyNastran.gui.utils.vtk.vtk_utils.numpy_to_vtk_points', 'numpy_to_vtk_points', (['all_points_array'], {'deep': '(0)'}), '(all_points_array, deep=0)\n', (83718, 83744), False, 'from pyNastran.gui.utils.vtk.vtk_utils import get_numpy_idtype_for_vtk, numpy_to_vtk_points, create_vtk_cells_of_constant_element_type\n'), ((84584, 84599), 'vtk.vtkPoints', 'vtk.vtkPoints', ([], {}), '()\n', (84597, 84599), False, 'import vtk\n'), ((86949, 86966), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (86960, 86966), False, 'from collections import defaultdict, OrderedDict\n'), ((86992, 87009), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (87003, 87009), False, 'from collections import defaultdict, OrderedDict\n'), ((88146, 88178), 'itertools.chain', 'chain', (['model.spcs', 'model.spcadds'], {}), '(model.spcs, model.spcadds)\n', (88151, 88178), False, 'from itertools import chain\n'), ((88542, 88574), 'itertools.chain', 'chain', (['model.mpcs', 'model.mpcadds'], {}), '(model.mpcs, model.mpcadds)\n', (88547, 88574), False, 'from itertools import chain\n'), ((94661, 94680), 'numpy.unique', 'np.unique', (['node_ids'], {}), '(node_ids)\n', (94670, 94680), True, 'import numpy as np\n'), ((100035, 100066), 'numpy.asarray', 'np.asarray', (['eids'], {'dtype': '"""int32"""'}), "(eids, dtype='int32')\n", (100045, 100066), True, 'import numpy as np\n'), ((100288, 100314), 'pyNastran.gui.utils.vtk.vtk_utils.numpy_to_vtk_points', 'numpy_to_vtk_points', (['nodes'], {}), '(nodes)\n', (100307, 100314), False, 'from pyNastran.gui.utils.vtk.vtk_utils import get_numpy_idtype_for_vtk, numpy_to_vtk_points, create_vtk_cells_of_constant_element_type\n'), ((100446, 100510), 'pyNastran.gui.utils.vtk.vtk_utils.create_vtk_cells_of_constant_element_type', 'create_vtk_cells_of_constant_element_type', (['grid', 'elements', 'etype'], {}), '(grid, elements, etype)\n', (100487, 100510), False, 'from pyNastran.gui.utils.vtk.vtk_utils import get_numpy_idtype_for_vtk, numpy_to_vtk_points, create_vtk_cells_of_constant_element_type\n'), ((101442, 101473), 'numpy.array', 'np.array', (['lines2'], {'dtype': '"""int32"""'}), "(lines2, dtype='int32')\n", (101450, 101473), True, 'import numpy as np\n'), ((101530, 101552), 'numpy.unique', 'np.unique', (['lines[:, 1]'], {}), '(lines[:, 1])\n', (101539, 101552), True, 'import numpy as np\n'), ((102566, 102581), 'vtk.vtkPoints', 'vtk.vtkPoints', ([], {}), '()\n', (102579, 102581), False, 'import vtk\n'), ((104706, 104721), 'vtk.vtkPoints', 'vtk.vtkPoints', ([], {}), '()\n', (104719, 104721), False, 'import vtk\n'), ((105891, 105906), 'vtk.vtkPoints', 'vtk.vtkPoints', ([], {}), '()\n', (105904, 105906), False, 'import vtk\n'), ((110410, 110444), 'numpy.zeros', 'np.zeros', (['nelements'], {'dtype': '"""int32"""'}), "(nelements, dtype='int32')\n", (110418, 110444), True, 'import numpy as np\n'), ((110466, 110500), 'numpy.zeros', 'np.zeros', (['nelements'], {'dtype': '"""int32"""'}), "(nelements, dtype='int32')\n", (110474, 110500), True, 'import numpy as np\n'), ((110522, 110559), 'numpy.full', 'np.full', (['nelements', '(-1)'], {'dtype': '"""int32"""'}), "(nelements, -1, dtype='int32')\n", (110529, 110559), True, 'import numpy as np\n'), ((110591, 110634), 'numpy.full', 'np.full', (['nelements', 'np.nan'], {'dtype': '"""float32"""'}), "(nelements, np.nan, dtype='float32')\n", (110598, 110634), True, 'import numpy as np\n'), ((110655, 110692), 'numpy.full', 'np.full', (['nelements', '(-1)'], {'dtype': '"""int32"""'}), "(nelements, -1, dtype='int32')\n", (110662, 110692), True, 'import numpy as np\n'), ((110716, 110753), 'numpy.full', 'np.full', (['nelements', '(-1)'], {'dtype': '"""int32"""'}), "(nelements, -1, dtype='int32')\n", (110723, 110753), True, 'import numpy as np\n'), ((110802, 110832), 'numpy.zeros', 'np.zeros', (['nelements', '"""float32"""'], {}), "(nelements, 'float32')\n", (110810, 110832), True, 'import numpy as np\n'), ((110862, 110892), 'numpy.zeros', 'np.zeros', (['nelements', '"""float32"""'], {}), "(nelements, 'float32')\n", (110870, 110892), True, 'import numpy as np\n'), ((110916, 110946), 'numpy.zeros', 'np.zeros', (['nelements', '"""float32"""'], {}), "(nelements, 'float32')\n", (110924, 110946), True, 'import numpy as np\n'), ((110972, 111002), 'numpy.zeros', 'np.zeros', (['nelements', '"""float32"""'], {}), "(nelements, 'float32')\n", (110980, 111002), True, 'import numpy as np\n'), ((111028, 111058), 'numpy.zeros', 'np.zeros', (['nelements', '"""float32"""'], {}), "(nelements, 'float32')\n", (111036, 111058), True, 'import numpy as np\n'), ((111086, 111116), 'numpy.zeros', 'np.zeros', (['nelements', '"""float32"""'], {}), "(nelements, 'float32')\n", (111094, 111116), True, 'import numpy as np\n'), ((111132, 111162), 'numpy.zeros', 'np.zeros', (['nelements', '"""float32"""'], {}), "(nelements, 'float32')\n", (111140, 111162), True, 'import numpy as np\n'), ((111184, 111214), 'numpy.zeros', 'np.zeros', (['nelements', '"""float32"""'], {}), "(nelements, 'float32')\n", (111192, 111214), True, 'import numpy as np\n'), ((111237, 111267), 'numpy.zeros', 'np.zeros', (['nelements', '"""float32"""'], {}), "(nelements, 'float32')\n", (111245, 111267), True, 'import numpy as np\n'), ((111294, 111324), 'numpy.zeros', 'np.zeros', (['nelements', '"""float32"""'], {}), "(nelements, 'float32')\n", (111302, 111324), True, 'import numpy as np\n'), ((111343, 111385), 'numpy.full', 'np.full', (['(nelements, 3)', 'np.nan', '"""float32"""'], {}), "((nelements, 3), np.nan, 'float32')\n", (111350, 111385), True, 'import numpy as np\n'), ((111468, 111494), 'pyNastran.gui.utils.vtk.vtk_utils.get_numpy_idtype_for_vtk', 'get_numpy_idtype_for_vtk', ([], {}), '()\n', (111492, 111494), False, 'from pyNastran.gui.utils.vtk.vtk_utils import get_numpy_idtype_for_vtk, numpy_to_vtk_points, create_vtk_cells_of_constant_element_type\n'), ((111523, 111555), 'numpy.zeros', 'np.zeros', (['nelements'], {'dtype': 'dtype'}), '(nelements, dtype=dtype)\n', (111531, 111555), True, 'import numpy as np\n'), ((111585, 111617), 'numpy.zeros', 'np.zeros', (['nelements'], {'dtype': 'dtype'}), '(nelements, dtype=dtype)\n', (111593, 111617), True, 'import numpy as np\n'), ((128323, 128355), 'numpy.array', 'np.array', (['nids_list'], {'dtype': 'dtype'}), '(nids_list, dtype=dtype)\n', (128331, 128355), True, 'import numpy as np\n'), ((128998, 129041), 'pyNastran.gui.utils.vtk.base_utils.numpy_to_vtkIdTypeArray', 'numpy_to_vtkIdTypeArray', (['nids_array'], {'deep': '(1)'}), '(nids_array, deep=1)\n', (129021, 129041), False, 'from pyNastran.gui.utils.vtk.base_utils import numpy_to_vtk, numpy_to_vtkIdTypeArray\n'), ((129062, 129080), 'vtk.vtkCellArray', 'vtk.vtkCellArray', ([], {}), '()\n', (129078, 129080), False, 'import vtk\n'), ((129329, 129400), 'pyNastran.gui.utils.vtk.base_utils.numpy_to_vtk', 'numpy_to_vtk', (['cell_offsets_array'], {'deep': 'deep', 'array_type': 'vtk.VTK_ID_TYPE'}), '(cell_offsets_array, deep=deep, array_type=vtk.VTK_ID_TYPE)\n', (129341, 129400), False, 'from pyNastran.gui.utils.vtk.base_utils import numpy_to_vtk, numpy_to_vtkIdTypeArray\n'), ((129797, 129810), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (129808, 129810), False, 'from collections import defaultdict, OrderedDict\n'), ((130094, 130119), 'numpy.array', 'np.array', (['nid_cp_cd[:, 2]'], {}), '(nid_cp_cd[:, 2])\n', (130102, 130119), True, 'import numpy as np\n'), ((130175, 130356), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['subcase_id', '"""NodeID"""', '"""NodeID"""', '"""node"""', 'all_nids'], {'mask_value': '(0)', 'nlabels': 'None', 'labelsize': 'None', 'ncolors': 'None', 'colormap': 'colormap', 'data_format': 'None', 'uname': '"""GuiResult"""'}), "(subcase_id, 'NodeID', 'NodeID', 'node', all_nids, mask_value=0,\n nlabels=None, labelsize=None, ncolors=None, colormap=colormap,\n data_format=None, uname='GuiResult')\n", (130184, 130356), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((130953, 131147), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['subcase_id', '"""ElementID"""', '"""ElementID"""', '"""centroid"""', 'eids_array'], {'mask_value': '(0)', 'nlabels': 'None', 'labelsize': 'None', 'ncolors': 'None', 'colormap': 'colormap', 'data_format': 'None', 'uname': '"""GuiResult"""'}), "(subcase_id, 'ElementID', 'ElementID', 'centroid', eids_array,\n mask_value=0, nlabels=None, labelsize=None, ncolors=None, colormap=\n colormap, data_format=None, uname='GuiResult')\n", (130962, 131147), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((135523, 135554), 'pyNastran.femutils.utils.underflow_norm', 'underflow_norm', (['normals'], {'axis': '(1)'}), '(normals, axis=1)\n', (135537, 135554), False, 'from pyNastran.femutils.utils import duplicates, is_monotonic, underflow_norm\n'), ((135671, 135691), 'numpy.isnan', 'np.isnan', (['normal_mag'], {}), '(normal_mag)\n', (135679, 135691), True, 'import numpy as np\n'), ((136561, 136601), 'pyNastran.femutils.nan.isfinite_and_nonzero', 'isfinite_and_nonzero', (['max_interior_angle'], {}), '(max_interior_angle)\n', (136581, 136601), False, 'from pyNastran.femutils.nan import isfinite, isfinite_and_greater_than, isfinite_and_nonzero, isgreater_int\n'), ((145915, 145944), 'pyNastran.femutils.nan.isgreater_int', 'isgreater_int', (['mcid_array', '(-1)'], {}), '(mcid_array, -1)\n', (145928, 145944), False, 'from pyNastran.femutils.nan import isfinite, isfinite_and_greater_than, isfinite_and_nonzero, isgreater_int\n'), ((146320, 146350), 'pyNastran.femutils.nan.isfinite', 'isfinite', (['material_theta_array'], {}), '(material_theta_array)\n', (146328, 146350), False, 'from pyNastran.femutils.nan import isfinite, isfinite_and_greater_than, isfinite_and_nonzero, isgreater_int\n'), ((148955, 148968), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (148966, 148968), False, 'from collections import defaultdict, OrderedDict\n'), ((153193, 153250), 'pyNastran.bdf.mesh_utils.export_mcids.export_mcids_all', 'export_mcids_all', (['model'], {'eids': 'None', 'log': 'None', 'debug': '(False)'}), '(model, eids=None, log=None, debug=False)\n', (153209, 153250), False, 'from pyNastran.bdf.mesh_utils.export_mcids import export_mcids_all\n'), ((156695, 156723), 'numpy.zeros', 'np.zeros', (['nelements', '"""int32"""'], {}), "(nelements, 'int32')\n", (156703, 156723), True, 'import numpy as np\n'), ((156749, 156786), 'numpy.full', 'np.full', (['nelements', '(-1)'], {'dtype': '"""int32"""'}), "(nelements, -1, dtype='int32')\n", (156756, 156786), True, 'import numpy as np\n'), ((156812, 156855), 'numpy.full', 'np.full', (['nelements', 'np.nan'], {'dtype': '"""float32"""'}), "(nelements, np.nan, dtype='float32')\n", (156819, 156855), True, 'import numpy as np\n'), ((158976, 158993), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (158987, 158993), False, 'from collections import defaultdict, OrderedDict\n'), ((195054, 195082), 'numpy.zeros', 'np.zeros', (['nelements', '"""int32"""'], {}), "(nelements, 'int32')\n", (195062, 195082), True, 'import numpy as np\n'), ((195108, 195145), 'numpy.full', 'np.full', (['nelements', '(-1)'], {'dtype': '"""int32"""'}), "(nelements, -1, dtype='int32')\n", (195115, 195145), True, 'import numpy as np\n'), ((195171, 195214), 'numpy.full', 'np.full', (['nelements', 'np.nan'], {'dtype': '"""float32"""'}), "(nelements, np.nan, dtype='float32')\n", (195178, 195214), True, 'import numpy as np\n'), ((195244, 195274), 'numpy.zeros', 'np.zeros', (['nelements', '"""float32"""'], {}), "(nelements, 'float32')\n", (195252, 195274), True, 'import numpy as np\n'), ((195304, 195334), 'numpy.zeros', 'np.zeros', (['nelements', '"""float32"""'], {}), "(nelements, 'float32')\n", (195312, 195334), True, 'import numpy as np\n'), ((195358, 195388), 'numpy.zeros', 'np.zeros', (['nelements', '"""float32"""'], {}), "(nelements, 'float32')\n", (195366, 195388), True, 'import numpy as np\n'), ((195414, 195444), 'numpy.zeros', 'np.zeros', (['nelements', '"""float32"""'], {}), "(nelements, 'float32')\n", (195422, 195444), True, 'import numpy as np\n'), ((195470, 195500), 'numpy.zeros', 'np.zeros', (['nelements', '"""float32"""'], {}), "(nelements, 'float32')\n", (195478, 195500), True, 'import numpy as np\n'), ((195528, 195558), 'numpy.zeros', 'np.zeros', (['nelements', '"""float32"""'], {}), "(nelements, 'float32')\n", (195536, 195558), True, 'import numpy as np\n'), ((195574, 195604), 'numpy.zeros', 'np.zeros', (['nelements', '"""float32"""'], {}), "(nelements, 'float32')\n", (195582, 195604), True, 'import numpy as np\n'), ((195626, 195656), 'numpy.zeros', 'np.zeros', (['nelements', '"""float32"""'], {}), "(nelements, 'float32')\n", (195634, 195656), True, 'import numpy as np\n'), ((195679, 195709), 'numpy.zeros', 'np.zeros', (['nelements', '"""float32"""'], {}), "(nelements, 'float32')\n", (195687, 195709), True, 'import numpy as np\n'), ((195736, 195766), 'numpy.zeros', 'np.zeros', (['nelements', '"""float32"""'], {}), "(nelements, 'float32')\n", (195744, 195766), True, 'import numpy as np\n'), ((197806, 197823), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (197817, 197823), False, 'from collections import defaultdict, OrderedDict\n'), ((236841, 236946), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""PropertyID"""', 'title': '"""PropertyID"""', 'location': '"""centroid"""', 'scalar': 'pids', 'mask_value': '(0)'}), "(0, header='PropertyID', title='PropertyID', location='centroid',\n scalar=pids, mask_value=0)\n", (236850, 236946), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((237106, 237121), 'numpy.unique', 'np.unique', (['pids'], {}), '(pids)\n', (237115, 237121), True, 'import numpy as np\n'), ((238198, 238237), 'numpy.zeros', 'np.zeros', (['(nelements, 4)'], {'dtype': '"""int32"""'}), "((nelements, 4), dtype='int32')\n", (238206, 238237), True, 'import numpy as np\n'), ((238258, 238306), 'numpy.full', 'np.full', (['(nelements, 4)', 'np.nan'], {'dtype': '"""float32"""'}), "((nelements, 4), np.nan, dtype='float32')\n", (238265, 238306), True, 'import numpy as np\n'), ((242770, 242859), 'pyNastran.bdf.mesh_utils.forces_moments.get_pressure_array', 'get_pressure_array', (['model', 'load_case_id'], {'eids': 'self.element_ids', 'stop_on_failure': '(False)'}), '(model, load_case_id, eids=self.element_ids,\n stop_on_failure=False)\n', (242788, 242859), False, 'from pyNastran.bdf.mesh_utils.forces_moments import get_load_arrays, get_pressure_array\n'), ((254902, 254919), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (254913, 254919), False, 'from collections import defaultdict, OrderedDict\n'), ((254942, 254959), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (254953, 254959), False, 'from collections import defaultdict, OrderedDict\n'), ((254982, 254999), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (254993, 254999), False, 'from collections import defaultdict, OrderedDict\n'), ((255021, 255038), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (255032, 255038), False, 'from collections import defaultdict, OrderedDict\n'), ((255068, 255085), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (255079, 255085), False, 'from collections import defaultdict, OrderedDict\n'), ((255110, 255127), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (255121, 255127), False, 'from collections import defaultdict, OrderedDict\n'), ((266523, 266565), 'pyNastran.femutils.nan.isfinite_and_greater_than', 'isfinite_and_greater_than', (['element_dim', '(-1)'], {}), '(element_dim, -1)\n', (266548, 266565), False, 'from pyNastran.femutils.nan import isfinite, isfinite_and_greater_than, isfinite_and_nonzero, isgreater_int\n'), ((266589, 266702), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""ElementDim"""', 'title': '"""ElementDim"""', 'location': '"""centroid"""', 'scalar': 'element_dim', 'mask_value': '(-1)'}), "(0, header='ElementDim', title='ElementDim', location='centroid',\n scalar=element_dim, mask_value=-1)\n", (266598, 266702), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((266908, 266925), 'pyNastran.femutils.nan.isfinite', 'isfinite', (['normals'], {}), '(normals)\n', (266916, 266925), False, 'from pyNastran.femutils.nan import isfinite, isfinite_and_greater_than, isfinite_and_nonzero, isgreater_int\n'), ((267051, 267091), 'pyNastran.femutils.nan.isfinite_and_nonzero', 'isfinite_and_nonzero', (['min_interior_angle'], {}), '(min_interior_angle)\n', (267071, 267091), False, 'from pyNastran.femutils.nan import isfinite, isfinite_and_greater_than, isfinite_and_nonzero, isgreater_int\n'), ((267104, 267144), 'pyNastran.femutils.nan.isfinite_and_nonzero', 'isfinite_and_nonzero', (['max_interior_angle'], {}), '(max_interior_angle)\n', (267124, 267144), False, 'from pyNastran.femutils.nan import isfinite, isfinite_and_greater_than, isfinite_and_nonzero, isgreater_int\n'), ((277397, 277538), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""MaterialCoord"""', 'title': '"""MaterialCoord"""', 'location': '"""centroid"""', 'scalar': 'material_coord', 'mask_value': '(-1)', 'data_format': '"""%i"""'}), "(0, header='MaterialCoord', title='MaterialCoord', location=\n 'centroid', scalar=material_coord, mask_value=-1, data_format='%i')\n", (277406, 277538), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((277769, 277897), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""MaterialTheta"""', 'title': '"""MaterialTheta"""', 'location': '"""centroid"""', 'scalar': 'material_theta', 'data_format': '"""%.3f"""'}), "(0, header='MaterialTheta', title='MaterialTheta', location=\n 'centroid', scalar=material_theta, data_format='%.3f')\n", (277778, 277897), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((291429, 291445), 'numpy.empty', 'np.empty', (['(0, 3)'], {}), '((0, 3))\n', (291437, 291445), True, 'import numpy as np\n'), ((22486, 22508), 'pyNastran.femutils.utils.is_monotonic', 'is_monotonic', (['all_nids'], {}), '(all_nids)\n', (22498, 22508), False, 'from pyNastran.femutils.utils import duplicates, is_monotonic, underflow_norm\n'), ((22710, 22730), 'numpy.argsort', 'np.argsort', (['all_nids'], {}), '(all_nids)\n', (22720, 22730), True, 'import numpy as np\n'), ((24627, 24689), 'pyNastran.op2.op2_geom.OP2Geom', 'OP2Geom', ([], {'make_geom': '(True)', 'debug': '(False)', 'log': 'log', 'debug_file': 'None'}), '(make_geom=True, debug=False, log=log, debug_file=None)\n', (24634, 24689), False, 'from pyNastran.op2.op2_geom import OP2Geom\n'), ((29764, 29779), 'pyNastran.gui.errors.NoGeometry', 'NoGeometry', (['msg'], {}), '(msg)\n', (29774, 29779), False, 'from pyNastran.gui.errors import NoGeometry, NoSuperelements\n'), ((30180, 30248), 'pyNastran.gui.errors.NoSuperelements', 'NoSuperelements', (['"""superelements are not supported in vectorized BDF"""'], {}), "('superelements are not supported in vectorized BDF')\n", (30195, 30248), False, 'from pyNastran.gui.errors import NoGeometry, NoSuperelements\n'), ((30482, 30497), 'pyNastran.gui.errors.NoGeometry', 'NoGeometry', (['msg'], {}), '(msg)\n', (30492, 30497), False, 'from pyNastran.gui.errors import NoGeometry, NoSuperelements\n'), ((33012, 33193), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['subcase_id', '"""NodeID"""', '"""NodeID"""', '"""node"""', 'all_nids'], {'mask_value': '(0)', 'nlabels': 'None', 'labelsize': 'None', 'ncolors': 'None', 'colormap': 'colormap', 'data_format': 'None', 'uname': '"""GuiResult"""'}), "(subcase_id, 'NodeID', 'NodeID', 'node', all_nids, mask_value=0,\n nlabels=None, labelsize=None, ncolors=None, colormap=colormap,\n data_format=None, uname='GuiResult')\n", (33021, 33193), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((34209, 34234), 'numpy.array', 'np.array', (['nid_cp_cd[:, 2]'], {}), '(nid_cp_cd[:, 2])\n', (34217, 34234), True, 'import numpy as np\n'), ((34651, 34845), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['subcase_id', '"""ElementID"""', '"""ElementID"""', '"""centroid"""', 'eids_array'], {'mask_value': '(0)', 'nlabels': 'None', 'labelsize': 'None', 'ncolors': 'None', 'colormap': 'colormap', 'data_format': 'None', 'uname': '"""GuiResult"""'}), "(subcase_id, 'ElementID', 'ElementID', 'centroid', eids_array,\n mask_value=0, nlabels=None, labelsize=None, ncolors=None, colormap=\n colormap, data_format=None, uname='GuiResult')\n", (34660, 34845), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((37413, 37524), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""PropertyID"""', 'title': '"""PropertyID"""', 'location': '"""centroid"""', 'scalar': 'pids_array', 'mask_value': '(0)'}), "(0, header='PropertyID', title='PropertyID', location='centroid',\n scalar=pids_array, mask_value=0)\n", (37422, 37524), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((38337, 38381), 'numpy.zeros', 'np.zeros', (['(nelements, nplies)'], {'dtype': '"""int32"""'}), "((nelements, nplies), dtype='int32')\n", (38345, 38381), True, 'import numpy as np\n'), ((38406, 38459), 'numpy.full', 'np.full', (['(nelements, nplies)', 'np.nan'], {'dtype': '"""float32"""'}), "((nelements, nplies), np.nan, dtype='float32')\n", (38413, 38459), True, 'import numpy as np\n'), ((38554, 38588), 'numpy.zeros', 'np.zeros', (['nelements'], {'dtype': '"""int32"""'}), "(nelements, dtype='int32')\n", (38562, 38588), True, 'import numpy as np\n'), ((38634, 38655), 'numpy.unique', 'np.unique', (['pids_array'], {}), '(pids_array)\n', (38643, 38655), True, 'import numpy as np\n'), ((39939, 40048), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""iProperty"""', 'title': '"""iProperty"""', 'location': '"""centroid"""', 'scalar': 'ipids', 'colormap': 'colormap'}), "(0, header='iProperty', title='iProperty', location='centroid',\n scalar=ipids, colormap=colormap)\n", (39948, 40048), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((48506, 48569), 'pyNastran.dev.bdf_vectorized2.op2_geom_vectorized.OP2Geom', 'OP2Geom_', ([], {'make_geom': '(True)', 'debug': '(False)', 'log': 'log', 'debug_file': 'None'}), '(make_geom=True, debug=False, log=log, debug_file=None)\n', (48514, 48569), True, 'from pyNastran.dev.bdf_vectorized2.op2_geom_vectorized import OP2Geom as OP2Geom_\n'), ((48824, 48849), 'pyNastran.dev.bdf_vectorized2.bdf_vectorized.BDF', 'BDF_', ([], {'log': 'log', 'debug': '(True)'}), '(log=log, debug=True)\n', (48828, 48849), True, 'from pyNastran.dev.bdf_vectorized2.bdf_vectorized import BDF as BDF_\n'), ((52817, 52832), 'pyNastran.gui.errors.NoGeometry', 'NoGeometry', (['msg'], {}), '(msg)\n', (52827, 52832), False, 'from pyNastran.gui.errors import NoGeometry, NoSuperelements\n'), ((53585, 53600), 'pyNastran.gui.errors.NoGeometry', 'NoGeometry', (['msg'], {}), '(msg)\n', (53595, 53600), False, 'from pyNastran.gui.errors import NoGeometry, NoSuperelements\n'), ((57284, 57318), 'qtpy.QtWidgets.QDockWidget', 'QDockWidget', (['"""Nastran Model"""', 'self'], {}), "('Nastran Model', self)\n", (57295, 57318), False, 'from qtpy.QtWidgets import QDockWidget\n'), ((71706, 71722), 'numpy.empty', 'np.empty', (['(0, 3)'], {}), '((0, 3))\n', (71714, 71722), True, 'import numpy as np\n'), ((82530, 82539), 'vtk.vtkQuad', 'vtkQuad', ([], {}), '()\n', (82537, 82539), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((83975, 84019), 'numpy.average', 'np.average', (['centroids'], {'weights': 'areas', 'axis': '(0)'}), '(centroids, weights=areas, axis=0)\n', (83985, 84019), True, 'import numpy as np\n'), ((89069, 89123), 'pyNastran.bdf.mesh_utils.mpc_dependency.get_mpc_node_ids', 'get_mpc_node_ids', (['model', 'mpc_id'], {'stop_on_failure': '(False)'}), '(model, mpc_id, stop_on_failure=False)\n', (89085, 89123), False, 'from pyNastran.bdf.mesh_utils.mpc_dependency import get_mpc_node_ids\n'), ((95293, 95309), 'collections.defaultdict', 'defaultdict', (['str'], {}), '(str)\n', (95304, 95309), False, 'from collections import defaultdict, OrderedDict\n'), ((97445, 97464), 'numpy.hstack', 'np.hstack', (['bar_eids'], {}), '(bar_eids)\n', (97454, 97464), True, 'import numpy as np\n'), ((97485, 97528), 'numpy.searchsorted', 'np.searchsorted', (['self.element_ids', 'bar_eids'], {}), '(self.element_ids, bar_eids)\n', (97500, 97528), True, 'import numpy as np\n'), ((103345, 103360), 'vtk.vtkVertex', 'vtk.vtkVertex', ([], {}), '()\n', (103358, 103360), False, 'import vtk\n'), ((105344, 105359), 'vtk.vtkVertex', 'vtk.vtkVertex', ([], {}), '()\n', (105357, 105359), False, 'import vtk\n'), ((106817, 106830), 'vtk.vtkLine', 'vtk.vtkLine', ([], {}), '()\n', (106828, 106830), False, 'import vtk\n'), ((127024, 127055), 'numpy.where', 'np.where', (['(cell_types_array == 0)'], {}), '(cell_types_array == 0)\n', (127032, 127055), True, 'import numpy as np\n'), ((130706, 130780), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""NodeCd"""', 'title': '"""NodeCd"""', 'location': '"""node"""', 'scalar': 'cds'}), "(0, header='NodeCd', title='NodeCd', location='node', scalar=cds)\n", (130715, 130780), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((132185, 132384), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['subcase_id', '"""NNodes/Elem"""', '"""NNodes/Elem"""', '"""centroid"""', 'nnodes_array'], {'mask_value': '(0)', 'nlabels': 'None', 'labelsize': 'None', 'ncolors': 'None', 'colormap': 'colormap', 'data_format': 'None', 'uname': '"""GuiResult"""'}), "(subcase_id, 'NNodes/Elem', 'NNodes/Elem', 'centroid',\n nnodes_array, mask_value=0, nlabels=None, labelsize=None, ncolors=None,\n colormap=colormap, data_format=None, uname='GuiResult')\n", (132194, 132384), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((136709, 136824), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""NormalX"""', 'title': '"""NormalX"""', 'location': '"""centroid"""', 'scalar': 'normals[:, 0]', 'data_format': '"""%.2f"""'}), "(0, header='NormalX', title='NormalX', location='centroid', scalar\n =normals[:, 0], data_format='%.2f')\n", (136718, 136824), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((136874, 136989), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""NormalY"""', 'title': '"""NormalY"""', 'location': '"""centroid"""', 'scalar': 'normals[:, 1]', 'data_format': '"""%.2f"""'}), "(0, header='NormalY', title='NormalY', location='centroid', scalar\n =normals[:, 1], data_format='%.2f')\n", (136883, 136989), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((137039, 137154), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""NormalZ"""', 'title': '"""NormalZ"""', 'location': '"""centroid"""', 'scalar': 'normals[:, 2]', 'data_format': '"""%.2f"""'}), "(0, header='NormalZ', title='NormalZ', location='centroid', scalar\n =normals[:, 2], data_format='%.2f')\n", (137048, 137154), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((137206, 137343), 'pyNastran.gui.gui_objects.gui_result.NormalResult', 'NormalResult', (['(0)', '"""Normals"""', '"""Normals"""'], {'nlabels': '(2)', 'labelsize': '(5)', 'ncolors': '(2)', 'colormap': 'colormap', 'data_format': '"""%.1f"""', 'uname': '"""NormalResult"""'}), "(0, 'Normals', 'Normals', nlabels=2, labelsize=5, ncolors=2,\n colormap=colormap, data_format='%.1f', uname='NormalResult')\n", (137218, 137343), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((137473, 137548), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""Area"""', 'title': '"""Area"""', 'location': '"""centroid"""', 'scalar': 'area'}), "(0, header='Area', title='Area', location='centroid', scalar=area)\n", (137482, 137548), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((137616, 137729), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""Min Edge Length"""', 'title': '"""Min Edge Length"""', 'location': '"""centroid"""', 'scalar': 'min_edge_length'}), "(0, header='Min Edge Length', title='Min Edge Length', location=\n 'centroid', scalar=min_edge_length)\n", (137625, 137729), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((138347, 138373), 'numpy.degrees', 'np.degrees', (['max_skew_angle'], {}), '(max_skew_angle)\n', (138357, 138373), True, 'import numpy as np\n'), ((138397, 138495), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""Max Skew Angle"""', 'title': '"""MaxSkewAngle"""', 'location': '"""centroid"""', 'scalar': 'skew'}), "(0, header='Max Skew Angle', title='MaxSkewAngle', location=\n 'centroid', scalar=skew)\n", (138406, 138495), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((138549, 138656), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""Aspect Ratio"""', 'title': '"""AspectRatio"""', 'location': '"""centroid"""', 'scalar': 'max_aspect_ratio'}), "(0, header='Aspect Ratio', title='AspectRatio', location=\n 'centroid', scalar=max_aspect_ratio)\n", (138558, 138656), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((141643, 141679), 'pyNastran.femutils.nan.isfinite_and_nonzero', 'isfinite_and_nonzero', (['max_warp_angle'], {}), '(max_warp_angle)\n', (141663, 141679), False, 'from pyNastran.femutils.nan import isfinite, isfinite_and_greater_than, isfinite_and_nonzero, isgreater_int\n'), ((145979, 146116), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""MaterialCoord"""', 'title': '"""MaterialCoord"""', 'location': '"""centroid"""', 'scalar': 'mcid_array', 'mask_value': '(-1)', 'data_format': '"""%i"""'}), "(0, header='MaterialCoord', title='MaterialCoord', location=\n 'centroid', scalar=mcid_array, mask_value=-1, data_format='%i')\n", (145988, 146116), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((146385, 146519), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""MaterialTheta"""', 'title': '"""MaterialTheta"""', 'location': '"""centroid"""', 'scalar': 'material_theta_array', 'data_format': '"""%.3f"""'}), "(0, header='MaterialTheta', title='MaterialTheta', location=\n 'centroid', scalar=material_theta_array, data_format='%.3f')\n", (146394, 146519), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((146857, 146882), 'pyNastran.femutils.nan.isfinite', 'isfinite', (['min_edge_length'], {}), '(min_edge_length)\n', (146865, 146882), False, 'from pyNastran.femutils.nan import isfinite, isfinite_and_greater_than, isfinite_and_nonzero, isgreater_int\n'), ((146915, 146942), 'numpy.nanmean', 'np.nanmean', (['min_edge_length'], {}), '(min_edge_length)\n', (146925, 146942), True, 'import numpy as np\n'), ((149328, 149353), 'numpy.array', 'np.array', (['nid_cp_cd[:, 0]'], {}), '(nid_cp_cd[:, 0])\n', (149336, 149353), True, 'import numpy as np\n'), ((149372, 149397), 'numpy.array', 'np.array', (['nid_cp_cd[:, 2]'], {}), '(nid_cp_cd[:, 2])\n', (149380, 149397), True, 'import numpy as np\n'), ((149421, 149496), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""NodeID"""', 'title': '"""NodeID"""', 'location': '"""node"""', 'scalar': 'nids'}), "(0, header='NodeID', title='NodeID', location='node', scalar=nids)\n", (149430, 149496), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((150129, 150163), 'numpy.zeros', 'np.zeros', (['nelements'], {'dtype': '"""int32"""'}), "(nelements, dtype='int32')\n", (150137, 150163), True, 'import numpy as np\n'), ((150307, 150410), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""ElementID"""', 'title': '"""ElementID"""', 'location': '"""centroid"""', 'scalar': 'eids', 'mask_value': '(0)'}), "(0, header='ElementID', title='ElementID', location='centroid',\n scalar=eids, mask_value=0)\n", (150316, 150410), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((150665, 150774), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""SuperelementID"""', 'title': '"""SuperelementID"""', 'location': '"""centroid"""', 'scalar': 'superelements'}), "(0, header='SuperelementID', title='SuperelementID', location=\n 'centroid', scalar=superelements)\n", (150674, 150774), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((152194, 152219), 'pyNastran.femutils.nan.isfinite', 'isfinite', (['min_edge_length'], {}), '(min_edge_length)\n', (152202, 152219), False, 'from pyNastran.femutils.nan import isfinite, isfinite_and_greater_than, isfinite_and_nonzero, isgreater_int\n'), ((153952, 153985), 'numpy.array', 'np.array', (['nodesi'], {'dtype': '"""float32"""'}), "(nodesi, dtype='float32')\n", (153960, 153985), True, 'import numpy as np\n'), ((154009, 154039), 'numpy.array', 'np.array', (['barsi'], {'dtype': '"""int32"""'}), "(barsi, dtype='int32')\n", (154017, 154039), True, 'import numpy as np\n'), ((154116, 154181), 'pyNastran.gui.utils.vtk.vtk_utils.numpy_to_vtk_points', 'numpy_to_vtk_points', (['nodes_array'], {'points': 'None', 'dtype': '"""<f"""', 'deep': '(1)'}), "(nodes_array, points=None, dtype='<f', deep=1)\n", (154135, 154181), False, 'from pyNastran.gui.utils.vtk.vtk_utils import get_numpy_idtype_for_vtk, numpy_to_vtk_points, create_vtk_cells_of_constant_element_type\n'), ((154229, 154293), 'pyNastran.gui.utils.vtk.vtk_utils.create_vtk_cells_of_constant_element_type', 'create_vtk_cells_of_constant_element_type', (['grid', 'elements', 'etype'], {}), '(grid, elements, etype)\n', (154270, 154293), False, 'from pyNastran.gui.utils.vtk.vtk_utils import get_numpy_idtype_for_vtk, numpy_to_vtk_points, create_vtk_cells_of_constant_element_type\n'), ((154794, 154824), 'numpy.array', 'np.array', (['lines'], {'dtype': '"""int32"""'}), "(lines, dtype='int32')\n", (154802, 154824), True, 'import numpy as np\n'), ((236485, 236519), 'pyNastran.femutils.nan.isfinite_and_greater_than', 'isfinite_and_greater_than', (['pids', '(0)'], {}), '(pids, 0)\n', (236510, 236519), False, 'from pyNastran.femutils.nan import isfinite, isfinite_and_greater_than, isfinite_and_nonzero, isgreater_int\n'), ((237823, 237857), 'numpy.zeros', 'np.zeros', (['nelements'], {'dtype': '"""int32"""'}), "(nelements, dtype='int32')\n", (237831, 237857), True, 'import numpy as np\n'), ((237883, 237928), 'numpy.zeros', 'np.zeros', (['(nelements, npliesi)'], {'dtype': '"""int32"""'}), "((nelements, npliesi), dtype='int32')\n", (237891, 237928), True, 'import numpy as np\n'), ((237959, 238013), 'numpy.full', 'np.full', (['(nelements, npliesi)', 'np.nan'], {'dtype': '"""float32"""'}), "((nelements, npliesi), np.nan, dtype='float32')\n", (237966, 238013), True, 'import numpy as np\n'), ((238039, 238084), 'numpy.zeros', 'np.zeros', (['(nelements, npliesi)'], {'dtype': '"""int32"""'}), "((nelements, npliesi), dtype='int32')\n", (238047, 238084), True, 'import numpy as np\n'), ((240741, 240765), 'numpy.hstack', 'np.hstack', (['mid_eids_skip'], {}), '(mid_eids_skip)\n', (240750, 240765), True, 'import numpy as np\n'), ((243153, 243255), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['subcase_id'], {'header': '"""Pressure"""', 'title': '"""Pressure"""', 'location': '"""centroid"""', 'scalar': 'pressures'}), "(subcase_id, header='Pressure', title='Pressure', location=\n 'centroid', scalar=pressures)\n", (243162, 243255), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((244183, 244312), 'pyNastran.bdf.mesh_utils.forces_moments.get_load_arrays', 'get_load_arrays', (['model', 'subcase_id'], {'eid_map': 'self.eid_map', 'node_ids': 'self.node_ids', 'normals': 'self.normals', 'nid_map': 'self.nid_map'}), '(model, subcase_id, eid_map=self.eid_map, node_ids=self.\n node_ids, normals=self.normals, nid_map=self.nid_map)\n', (244198, 244312), False, 'from pyNastran.bdf.mesh_utils.forces_moments import get_load_arrays, get_pressure_array\n'), ((255682, 255740), 'pyNastran.op2.result_objects.stress_object.StressObject', 'StressObject', (['model', 'key', 'self.element_ids'], {'is_stress': '(True)'}), '(model, key, self.element_ids, is_stress=True)\n', (255694, 255740), False, 'from pyNastran.op2.result_objects.stress_object import StressObject\n'), ((255772, 255831), 'pyNastran.op2.result_objects.stress_object.StressObject', 'StressObject', (['model', 'key', 'self.element_ids'], {'is_stress': '(False)'}), '(model, key, self.element_ids, is_stress=False)\n', (255784, 255831), False, 'from pyNastran.op2.result_objects.stress_object import StressObject\n'), ((267287, 267402), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""NormalX"""', 'title': '"""NormalX"""', 'location': '"""centroid"""', 'scalar': 'normals[:, 0]', 'data_format': '"""%.2f"""'}), "(0, header='NormalX', title='NormalX', location='centroid', scalar\n =normals[:, 0], data_format='%.2f')\n", (267296, 267402), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((267452, 267567), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""NormalY"""', 'title': '"""NormalY"""', 'location': '"""centroid"""', 'scalar': 'normals[:, 1]', 'data_format': '"""%.2f"""'}), "(0, header='NormalY', title='NormalY', location='centroid', scalar\n =normals[:, 1], data_format='%.2f')\n", (267461, 267567), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((267617, 267732), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""NormalZ"""', 'title': '"""NormalZ"""', 'location': '"""centroid"""', 'scalar': 'normals[:, 2]', 'data_format': '"""%.2f"""'}), "(0, header='NormalZ', title='NormalZ', location='centroid', scalar\n =normals[:, 2], data_format='%.2f')\n", (267626, 267732), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((267784, 267921), 'pyNastran.gui.gui_objects.gui_result.NormalResult', 'NormalResult', (['(0)', '"""Normals"""', '"""Normals"""'], {'nlabels': '(2)', 'labelsize': '(5)', 'ncolors': '(2)', 'colormap': 'colormap', 'data_format': '"""%.1f"""', 'uname': '"""NormalResult"""'}), "(0, 'Normals', 'Normals', nlabels=2, labelsize=5, ncolors=2,\n colormap=colormap, data_format='%.1f', uname='NormalResult')\n", (267796, 267921), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((268098, 268173), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""Area"""', 'title': '"""Area"""', 'location': '"""centroid"""', 'scalar': 'area'}), "(0, header='Area', title='Area', location='centroid', scalar=area)\n", (268107, 268173), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((268241, 268354), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""Min Edge Length"""', 'title': '"""Min Edge Length"""', 'location': '"""centroid"""', 'scalar': 'min_edge_length'}), "(0, header='Min Edge Length', title='Min Edge Length', location=\n 'centroid', scalar=min_edge_length)\n", (268250, 268354), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((268972, 268998), 'numpy.degrees', 'np.degrees', (['max_skew_angle'], {}), '(max_skew_angle)\n', (268982, 268998), True, 'import numpy as np\n'), ((269022, 269120), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""Max Skew Angle"""', 'title': '"""MaxSkewAngle"""', 'location': '"""centroid"""', 'scalar': 'skew'}), "(0, header='Max Skew Angle', title='MaxSkewAngle', location=\n 'centroid', scalar=skew)\n", (269031, 269120), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((269174, 269281), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""Aspect Ratio"""', 'title': '"""AspectRatio"""', 'location': '"""centroid"""', 'scalar': 'max_aspect_ratio'}), "(0, header='Aspect Ratio', title='AspectRatio', location=\n 'centroid', scalar=max_aspect_ratio)\n", (269183, 269281), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((269566, 269667), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""NNodes/Elem"""', 'title': '"""NNodes/Elem"""', 'location': '"""centroid"""', 'scalar': 'nnodes_array'}), "(0, header='NNodes/Elem', title='NNodes/Elem', location='centroid',\n scalar=nnodes_array)\n", (269575, 269667), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((272231, 272267), 'pyNastran.femutils.nan.isfinite_and_nonzero', 'isfinite_and_nonzero', (['max_warp_angle'], {}), '(max_warp_angle)\n', (272251, 272267), False, 'from pyNastran.femutils.nan import isfinite, isfinite_and_greater_than, isfinite_and_nonzero, isgreater_int\n'), ((274488, 274566), 'pyNastran.gui.gui_objects.displacements.ElementalTableResults', 'ElementalTableResults', (['subcase_id', 'titles', 'headers', 'xyz_offset', 'offset', 'scales'], {}), '(subcase_id, titles, headers, xyz_offset, offset, scales)\n', (274509, 274566), False, 'from pyNastran.gui.gui_objects.displacements import ForceTableResults, ElementalTableResults\n'), ((274861, 274958), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""X"""', 'title': '"""X"""', 'location': '"""node"""', 'scalar': 'xyz_cid0[:, 0]', 'data_format': '"""%g"""'}), "(0, header='X', title='X', location='node', scalar=xyz_cid0[:, 0],\n data_format='%g')\n", (274870, 274958), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((275008, 275105), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""Y"""', 'title': '"""Y"""', 'location': '"""node"""', 'scalar': 'xyz_cid0[:, 1]', 'data_format': '"""%g"""'}), "(0, header='Y', title='Y', location='node', scalar=xyz_cid0[:, 1],\n data_format='%g')\n", (275017, 275105), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((275155, 275252), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""Z"""', 'title': '"""Z"""', 'location': '"""node"""', 'scalar': 'xyz_cid0[:, 2]', 'data_format': '"""%g"""'}), "(0, header='Z', title='Z', location='node', scalar=xyz_cid0[:, 2],\n data_format='%g')\n", (275164, 275252), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((279505, 279537), 'pyNastran.femutils.nan.isfinite_and_nonzero', 'isfinite_and_nonzero', (['thicknessi'], {}), '(thicknessi)\n', (279525, 279537), False, 'from pyNastran.femutils.nan import isfinite, isfinite_and_greater_than, isfinite_and_nonzero, isgreater_int\n'), ((284931, 285043), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""DV Region"""', 'title': '"""DV Region"""', 'location': '"""centroid"""', 'scalar': 'design_region', 'mask_value': '(0)'}), "(0, header='DV Region', title='DV Region', location='centroid',\n scalar=design_region, mask_value=0)\n", (284940, 285043), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((285098, 285220), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': "('DVPREL Init - %s' % key)", 'title': "('DVPREL Init - %s' % key)", 'location': '"""centroid"""', 'scalar': 'dvprel_init'}), "(0, header='DVPREL Init - %s' % key, title='DVPREL Init - %s' %\n key, location='centroid', scalar=dvprel_init)\n", (285107, 285220), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((291331, 291354), 'numpy.vstack', 'np.vstack', (['caero_points'], {}), '(caero_points)\n', (291340, 291354), True, 'import numpy as np\n'), ((18925, 19087), 'pyNastran.bdf.mesh_utils.bdf_renumber.superelement_renumber', 'superelement_renumber', (['model'], {'bdf_filename_out': 'bdf_filename_out', 'size': '(8)', 'is_double': '(False)', 'starting_id_dict': 'None', 'cards_to_skip': 'None', 'log': 'None', 'debug': '(False)'}), '(model, bdf_filename_out=bdf_filename_out, size=8,\n is_double=False, starting_id_dict=None, cards_to_skip=None, log=None,\n debug=False)\n', (18946, 19087), False, 'from pyNastran.bdf.mesh_utils.bdf_renumber import superelement_renumber\n'), ((19168, 19204), 'pyNastran.bdf.bdf.BDF', 'BDF', ([], {'debug': 'None', 'log': 'log', 'mode': '"""msc"""'}), "(debug=None, log=log, mode='msc')\n", (19171, 19204), False, 'from pyNastran.bdf.bdf import BDF, CAERO1, CAERO2, CAERO3, CAERO4, CAERO5, CQUAD4, CQUAD8, CQUAD, CQUADR, CSHEAR, CTRIA3, CTRIA6, CTRIAR, CPLSTN3, CPLSTN4, CPLSTN6, CPLSTN8, CPLSTS3, CPLSTS4, CPLSTS6, CPLSTS8, CTRAX3, CTRIAX6, CTRIAX, CQUADX4, CQUADX8, CQUADX, CONM2\n'), ((22405, 22425), 'pyNastran.femutils.utils.duplicates', 'duplicates', (['all_nids'], {}), '(all_nids)\n', (22415, 22425), False, 'from pyNastran.femutils.utils import duplicates, is_monotonic, underflow_norm\n'), ((24902, 24926), 'pyNastran.bdf.bdf.BDF', 'BDF', ([], {'log': 'log', 'debug': '(True)'}), '(log=log, debug=True)\n', (24905, 24926), False, 'from pyNastran.bdf.bdf import BDF, CAERO1, CAERO2, CAERO3, CAERO4, CAERO5, CQUAD4, CQUAD8, CQUAD, CQUADR, CSHEAR, CTRIA3, CTRIA6, CTRIAR, CPLSTN3, CPLSTN4, CPLSTN6, CPLSTN8, CPLSTS3, CPLSTS4, CPLSTS6, CPLSTS8, CTRAX3, CTRIAX6, CTRIAX, CQUADX4, CQUADX8, CQUADX, CONM2\n'), ((34290, 34387), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""NodeCd"""', 'title': '"""NodeCd"""', 'location': '"""node"""', 'scalar': 'cds', 'colormap': 'colormap'}), "(0, header='NodeCd', title='NodeCd', location='node', scalar=cds,\n colormap=colormap)\n", (34299, 34387), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((35999, 36195), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['subcase_id', '"""ElementDim"""', '"""ElementDim"""', '"""centroid"""', 'dim_array'], {'mask_value': '(-1)', 'nlabels': 'None', 'labelsize': 'None', 'ncolors': 'None', 'colormap': 'colormap', 'data_format': 'None', 'uname': '"""GuiResult"""'}), "(subcase_id, 'ElementDim', 'ElementDim', 'centroid', dim_array,\n mask_value=-1, nlabels=None, labelsize=None, ncolors=None, colormap=\n colormap, data_format=None, uname='GuiResult')\n", (36008, 36195), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((36697, 36897), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['subcase_id', '"""NNodes/Elem"""', '"""NNodes/Elem"""', '"""centroid"""', 'nnodes_array'], {'mask_value': '(-1)', 'nlabels': 'None', 'labelsize': 'None', 'ncolors': 'None', 'colormap': 'colormap', 'data_format': 'None', 'uname': '"""GuiResult"""'}), "(subcase_id, 'NNodes/Elem', 'NNodes/Elem', 'centroid',\n nnodes_array, mask_value=-1, nlabels=None, labelsize=None, ncolors=None,\n colormap=colormap, data_format=None, uname='GuiResult')\n", (36706, 36897), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((75909, 75918), 'vtk.vtkQuad', 'vtkQuad', ([], {}), '()\n', (75916, 75918), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((79212, 79221), 'vtk.vtkQuad', 'vtkQuad', ([], {}), '()\n', (79219, 79221), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((79687, 79696), 'vtk.vtkQuad', 'vtkQuad', ([], {}), '()\n', (79694, 79696), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((81869, 81878), 'vtk.vtkQuad', 'vtkQuad', ([], {}), '()\n', (81876, 81878), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((85262, 85277), 'vtk.vtkVertex', 'vtk.vtkVertex', ([], {}), '()\n', (85275, 85277), False, 'import vtk\n'), ((98673, 98712), 'numpy.searchsorted', 'np.searchsorted', (['self.element_ids', 'eids'], {}), '(self.element_ids, eids)\n', (98688, 98712), True, 'import numpy as np\n'), ((98739, 98789), 'numpy.full', 'np.full', (['self.element_ids.shape', '(-1)'], {'dtype': '"""int32"""'}), "(self.element_ids.shape, -1, dtype='int32')\n", (98746, 98789), True, 'import numpy as np\n'), ((99394, 99485), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': 'msg', 'title': 'msg', 'location': '"""centroid"""', 'scalar': 'is_type', 'mask_value': '(-1)'}), "(0, header=msg, title=msg, location='centroid', scalar=is_type,\n mask_value=-1)\n", (99403, 99485), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((100087, 100121), 'numpy.asarray', 'np.asarray', (['lines'], {'dtype': '"""float32"""'}), "(lines, dtype='float32')\n", (100097, 100121), True, 'import numpy as np\n'), ((100334, 100369), 'numpy.arange', 'np.arange', (['(0)', 'nnodes'], {'dtype': '"""int32"""'}), "(0, nnodes, dtype='int32')\n", (100343, 100369), True, 'import numpy as np\n'), ((111645, 111660), 'vtk.vtkVertex', 'vtk.vtkVertex', ([], {}), '()\n', (111658, 111660), False, 'import vtk\n'), ((111700, 111713), 'vtk.vtkLine', 'vtk.vtkLine', ([], {}), '()\n', (111711, 111713), False, 'import vtk\n'), ((111753, 111766), 'vtk.vtkTriangle', 'vtkTriangle', ([], {}), '()\n', (111764, 111766), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((111806, 111828), 'vtk.vtkQuadraticTriangle', 'vtkQuadraticTriangle', ([], {}), '()\n', (111826, 111828), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((111869, 111878), 'vtk.vtkQuad', 'vtkQuad', ([], {}), '()\n', (111876, 111878), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((111980, 111990), 'vtk.vtkTetra', 'vtkTetra', ([], {}), '()\n', (111988, 111990), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((112033, 112052), 'vtk.vtkQuadraticTetra', 'vtkQuadraticTetra', ([], {}), '()\n', (112050, 112052), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((112094, 112106), 'vtk.vtkPyramid', 'vtkPyramid', ([], {}), '()\n', (112104, 112106), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((112217, 112227), 'vtk.vtkWedge', 'vtkWedge', ([], {}), '()\n', (112225, 112227), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((112270, 112289), 'vtk.vtkQuadraticWedge', 'vtkQuadraticWedge', ([], {}), '()\n', (112287, 112289), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((112330, 112345), 'vtk.vtkHexahedron', 'vtkHexahedron', ([], {}), '()\n', (112343, 112345), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((112387, 112411), 'vtk.vtkQuadraticHexahedron', 'vtkQuadraticHexahedron', ([], {}), '()\n', (112409, 112411), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((113804, 113835), 'numpy.searchsorted', 'np.searchsorted', (['all_nids', 'nids'], {}), '(all_nids, nids)\n', (113819, 113835), True, 'import numpy as np\n'), ((113906, 113929), 'pyNastran.bdf.mesh_utils.delete_bad_elements.tri_quality', 'tri_quality', (['p1', 'p2', 'p3'], {}), '(p1, p2, p3)\n', (113917, 113929), False, 'from pyNastran.bdf.mesh_utils.delete_bad_elements import tri_quality, quad_quality, get_min_max_theta\n'), ((114084, 114110), 'numpy.cross', 'np.cross', (['(p1 - p2)', '(p1 - p3)'], {}), '(p1 - p2, p1 - p3)\n', (114092, 114110), True, 'import numpy as np\n'), ((125661, 125698), 'numpy.array_equal', 'np.array_equal', (['all_nids[inids]', 'nids'], {}), '(all_nids[inids], nids)\n', (125675, 125698), True, 'import numpy as np\n'), ((127199, 127230), 'numpy.where', 'np.where', (['(cell_types_array != 0)'], {}), '(cell_types_array != 0)\n', (127207, 127230), True, 'import numpy as np\n'), ((139001, 139102), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""NNodes/Elem"""', 'title': '"""NNodes/Elem"""', 'location': '"""centroid"""', 'scalar': 'nnodes_array'}), "(0, header='NNodes/Elem', title='NNodes/Elem', location='centroid',\n scalar=nnodes_array)\n", (139010, 139102), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((140926, 141023), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""Area Ratio"""', 'title': '"""Area Ratio"""', 'location': '"""centroid"""', 'scalar': 'area_ratio'}), "(0, header='Area Ratio', title='Area Ratio', location='centroid',\n scalar=area_ratio)\n", (140935, 141023), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((141331, 141431), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""Taper Ratio"""', 'title': '"""Taper Ratio"""', 'location': '"""centroid"""', 'scalar': 'taper_ratio'}), "(0, header='Taper Ratio', title='Taper Ratio', location='centroid',\n scalar=taper_ratio)\n", (141340, 141431), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((143410, 143507), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""X"""', 'title': '"""X"""', 'location': '"""node"""', 'scalar': 'xyz_cid0[:, 0]', 'data_format': '"""%g"""'}), "(0, header='X', title='X', location='node', scalar=xyz_cid0[:, 0],\n data_format='%g')\n", (143419, 143507), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((143569, 143666), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""Y"""', 'title': '"""Y"""', 'location': '"""node"""', 'scalar': 'xyz_cid0[:, 1]', 'data_format': '"""%g"""'}), "(0, header='Y', title='Y', location='node', scalar=xyz_cid0[:, 1],\n data_format='%g')\n", (143578, 143666), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((143728, 143825), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""Z"""', 'title': '"""Z"""', 'location': '"""node"""', 'scalar': 'xyz_cid0[:, 2]', 'data_format': '"""%g"""'}), "(0, header='Z', title='Z', location='node', scalar=xyz_cid0[:, 2],\n data_format='%g')\n", (143737, 143825), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((144402, 144515), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""Min Edge Length"""', 'title': '"""Min Edge Length"""', 'location': '"""centroid"""', 'scalar': 'min_edge_length'}), "(0, header='Min Edge Length', title='Min Edge Length', location=\n 'centroid', scalar=min_edge_length)\n", (144411, 144515), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((149718, 149792), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""NodeCd"""', 'title': '"""NodeCd"""', 'location': '"""node"""', 'scalar': 'cds'}), "(0, header='NodeCd', title='NodeCd', location='node', scalar=cds)\n", (149727, 149792), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((152252, 152279), 'numpy.nanmean', 'np.nanmean', (['min_edge_length'], {}), '(min_edge_length)\n', (152262, 152279), True, 'import numpy as np\n'), ((160073, 160086), 'vtk.vtkTriangle', 'vtkTriangle', ([], {}), '()\n', (160084, 160086), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((199270, 199283), 'vtk.vtkTriangle', 'vtkTriangle', ([], {}), '()\n', (199281, 199283), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((199742, 199765), 'pyNastran.bdf.mesh_utils.delete_bad_elements.tri_quality', 'tri_quality', (['p1', 'p2', 'p3'], {}), '(p1, p2, p3)\n', (199753, 199765), False, 'from pyNastran.bdf.mesh_utils.delete_bad_elements import tri_quality, quad_quality, get_min_max_theta\n'), ((234216, 234236), 'numpy.isnan', 'np.isnan', (['max_thetai'], {}), '(max_thetai)\n', (234224, 234236), True, 'import numpy as np\n'), ((240868, 240898), 'numpy.setdiff1d', 'np.setdiff1d', (['i', 'mid_eids_skip'], {}), '(i, mid_eids_skip)\n', (240880, 240898), True, 'import numpy as np\n'), ((242995, 243012), 'numpy.abs', 'np.abs', (['pressures'], {}), '(pressures)\n', (243001, 243012), True, 'import numpy as np\n'), ((248809, 248919), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['subcase_id'], {'header': 'temperature_key', 'title': 'temperature_key', 'location': '"""node"""', 'scalar': 'temperatures'}), "(subcase_id, header=temperature_key, title=temperature_key,\n location='node', scalar=temperatures)\n", (248818, 248919), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((249162, 249172), 'io.StringIO', 'StringIO', ([], {}), '()\n', (249170, 249172), False, 'from io import StringIO\n'), ((249185, 249219), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 'stringio'}), '(file=stringio)\n', (249204, 249219), False, 'import traceback\n'), ((249882, 249906), 'pyNastran.op2.op2.OP2', 'OP2', ([], {'log': 'log', 'debug': '(True)'}), '(log=log, debug=True)\n', (249885, 249906), False, 'from pyNastran.op2.op2 import OP2\n'), ((257822, 257852), 'os.path.basename', 'os.path.basename', (['op2_filename'], {}), '(op2_filename)\n', (257838, 257852), False, 'import os\n'), ((271514, 271611), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""Area Ratio"""', 'title': '"""Area Ratio"""', 'location': '"""centroid"""', 'scalar': 'area_ratio'}), "(0, header='Area Ratio', title='Area Ratio', location='centroid',\n scalar=area_ratio)\n", (271523, 271611), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((271919, 272019), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""Taper Ratio"""', 'title': '"""Taper Ratio"""', 'location': '"""centroid"""', 'scalar': 'taper_ratio'}), "(0, header='Taper Ratio', title='Taper Ratio', location='centroid',\n scalar=taper_ratio)\n", (271928, 272019), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((272872, 272976), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""Offset"""', 'title': '"""Offset"""', 'location': '"""centroid"""', 'scalar': 'offset', 'data_format': '"""%g"""'}), "(0, header='Offset', title='Offset', location='centroid', scalar=\n offset, data_format='%g')\n", (272881, 272976), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((273044, 273151), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""OffsetX"""', 'title': '"""OffsetX"""', 'location': '"""centroid"""', 'scalar': 'xoffset', 'data_format': '"""%g"""'}), "(0, header='OffsetX', title='OffsetX', location='centroid', scalar\n =xoffset, data_format='%g')\n", (273053, 273151), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((273219, 273326), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""OffsetY"""', 'title': '"""OffsetY"""', 'location': '"""centroid"""', 'scalar': 'yoffset', 'data_format': '"""%g"""'}), "(0, header='OffsetY', title='OffsetY', location='centroid', scalar\n =yoffset, data_format='%g')\n", (273228, 273326), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((273394, 273501), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""OffsetZ"""', 'title': '"""OffsetZ"""', 'location': '"""centroid"""', 'scalar': 'zoffset', 'data_format': '"""%g"""'}), "(0, header='OffsetZ', title='OffsetZ', location='centroid', scalar\n =zoffset, data_format='%g')\n", (273403, 273501), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((274131, 274169), 'numpy.vstack', 'np.vstack', (['[xoffset, yoffset, zoffset]'], {}), '([xoffset, yoffset, zoffset])\n', (274140, 274169), True, 'import numpy as np\n'), ((275934, 276047), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""Min Edge Length"""', 'title': '"""Min Edge Length"""', 'location': '"""centroid"""', 'scalar': 'min_edge_length'}), "(0, header='Min Edge Length', title='Min Edge Length', location=\n 'centroid', scalar=min_edge_length)\n", (275943, 276047), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((278665, 278773), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""Number of Plies"""', 'title': '"""nPlies"""', 'location': '"""centroid"""', 'scalar': 'nplies', 'mask_value': '(0)'}), "(0, header='Number of Plies', title='nPlies', location='centroid',\n scalar=nplies, mask_value=0)\n", (278674, 278773), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((280688, 280794), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""MaterialID"""', 'title': '"""MaterialID"""', 'location': '"""centroid"""', 'scalar': 'midsi', 'mask_value': '(0)'}), "(0, header='MaterialID', title='MaterialID', location='centroid',\n scalar=midsi, mask_value=0)\n", (280697, 280794), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((281381, 281400), 'numpy.fmax.reduce', 'np.fmax.reduce', (['e11'], {}), '(e11)\n', (281395, 281400), True, 'import numpy as np\n'), ((281450, 281464), 'numpy.isnan', 'np.isnan', (['e112'], {}), '(e112)\n', (281458, 281464), True, 'import numpy as np\n'), ((284849, 284873), 'numpy.nanmax', 'np.nanmax', (['design_region'], {}), '(design_region)\n', (284858, 284873), True, 'import numpy as np\n'), ((285583, 285606), 'numpy.isfinite', 'np.isfinite', (['dvprel_min'], {}), '(dvprel_min)\n', (285594, 285606), True, 'import numpy as np\n'), ((285637, 285756), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': "('DVPREL Min - %s' % key)", 'title': "('DVPREL Min - %s' % key)", 'location': '"""centroid"""', 'scalar': 'dvprel_min'}), "(0, header='DVPREL Min - %s' % key, title='DVPREL Min - %s' % key,\n location='centroid', scalar=dvprel_min)\n", (285646, 285756), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((285987, 286010), 'numpy.isfinite', 'np.isfinite', (['dvprel_max'], {}), '(dvprel_max)\n', (285998, 286010), True, 'import numpy as np\n'), ((286041, 286160), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': "('DVPREL Max - %s' % key)", 'title': "('DVPREL Max - %s' % key)", 'location': '"""centroid"""', 'scalar': 'dvprel_max'}), "(0, header='DVPREL Max - %s' % key, title='DVPREL Max - %s' % key,\n location='centroid', scalar=dvprel_max)\n", (286050, 286160), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((25055, 25079), 'pyNastran.bdf.bdf.BDF', 'BDF', ([], {'log': 'log', 'debug': '(True)'}), '(log=log, debug=True)\n', (25058, 25079), False, 'from pyNastran.bdf.bdf import BDF, CAERO1, CAERO2, CAERO3, CAERO4, CAERO5, CQUAD4, CQUAD8, CQUAD, CQUADR, CSHEAR, CTRIA3, CTRIA6, CTRIAR, CPLSTN3, CPLSTN4, CPLSTN6, CPLSTN8, CPLSTS3, CPLSTS4, CPLSTS6, CPLSTS8, CTRAX3, CTRIAX6, CTRIAX, CQUADX4, CQUADX8, CQUADX, CONM2\n'), ((25186, 25210), 'pyNastran.bdf.bdf.BDF', 'BDF', ([], {'log': 'log', 'debug': '(True)'}), '(log=log, debug=True)\n', (25189, 25210), False, 'from pyNastran.bdf.bdf import BDF, CAERO1, CAERO2, CAERO3, CAERO4, CAERO5, CQUAD4, CQUAD8, CQUAD, CQUADR, CSHEAR, CTRIA3, CTRIA6, CTRIAR, CPLSTN3, CPLSTN4, CPLSTN6, CPLSTN8, CPLSTS3, CPLSTS4, CPLSTS6, CPLSTS8, CTRAX3, CTRIAX6, CTRIAX, CQUADX4, CQUADX8, CQUADX, CONM2\n'), ((35946, 35966), 'numpy.unique', 'np.unique', (['dim_array'], {}), '(dim_array)\n', (35955, 35966), True, 'import numpy as np\n'), ((38910, 38938), 'numpy.where', 'np.where', (['(pids_array == upid)'], {}), '(pids_array == upid)\n', (38918, 38938), True, 'import numpy as np\n'), ((48172, 48202), 'os.path.splitext', 'os.path.splitext', (['bdf_filename'], {}), '(bdf_filename)\n', (48188, 48202), False, 'import os\n'), ((58623, 58658), 'numpy.searchsorted', 'np.searchsorted', (['self.node_ids', 'nid'], {}), '(self.node_ids, nid)\n', (58638, 58658), True, 'import numpy as np\n'), ((79767, 79776), 'vtk.vtkQuad', 'vtkQuad', ([], {}), '()\n', (79774, 79776), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((82337, 82395), 'numpy.cross', 'np.cross', (['(pointsi[2] - pointsi[0])', '(pointsi[3] - pointsi[1])'], {}), '(pointsi[2] - pointsi[0], pointsi[3] - pointsi[1])\n', (82345, 82395), True, 'import numpy as np\n'), ((85916, 85931), 'vtk.vtkVertex', 'vtk.vtkVertex', ([], {}), '()\n', (85929, 85931), False, 'import vtk\n'), ((114577, 114608), 'numpy.searchsorted', 'np.searchsorted', (['all_nids', 'nids'], {}), '(all_nids, nids)\n', (114592, 114608), True, 'import numpy as np\n'), ((114683, 114717), 'pyNastran.bdf.mesh_utils.delete_bad_elements.quad_quality', 'quad_quality', (['elem', 'p1', 'p2', 'p3', 'p4'], {}), '(elem, p1, p2, p3, p4)\n', (114695, 114717), False, 'from pyNastran.bdf.mesh_utils.delete_bad_elements import tri_quality, quad_quality, get_min_max_theta\n'), ((114909, 114935), 'numpy.cross', 'np.cross', (['(p1 - p3)', '(p2 - p4)'], {}), '(p1 - p3, p2 - p4)\n', (114917, 114935), True, 'import numpy as np\n'), ((133932, 133942), 'io.StringIO', 'StringIO', ([], {}), '()\n', (133940, 133942), False, 'from io import StringIO\n'), ((133959, 133986), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 's'}), '(file=s)\n', (133978, 133986), False, 'import traceback\n'), ((137918, 137948), 'numpy.degrees', 'np.degrees', (['min_interior_angle'], {}), '(min_interior_angle)\n', (137928, 137948), True, 'import numpy as np\n'), ((138109, 138139), 'numpy.degrees', 'np.degrees', (['max_interior_angle'], {}), '(max_interior_angle)\n', (138119, 138139), True, 'import numpy as np\n'), ((138301, 138325), 'numpy.degrees', 'np.degrees', (['dideal_theta'], {}), '(dideal_theta)\n', (138311, 138325), True, 'import numpy as np\n'), ((140837, 140860), 'numpy.isfinite', 'np.isfinite', (['area_ratio'], {}), '(area_ratio)\n', (140848, 140860), True, 'import numpy as np\n'), ((140866, 140887), 'numpy.nanmax', 'np.nanmax', (['area_ratio'], {}), '(area_ratio)\n', (140875, 140887), True, 'import numpy as np\n'), ((141239, 141263), 'numpy.isfinite', 'np.isfinite', (['taper_ratio'], {}), '(taper_ratio)\n', (141250, 141263), True, 'import numpy as np\n'), ((141269, 141291), 'numpy.nanmax', 'np.nanmax', (['taper_ratio'], {}), '(taper_ratio)\n', (141278, 141291), True, 'import numpy as np\n'), ((144951, 144977), 'numpy.degrees', 'np.degrees', (['max_skew_angle'], {}), '(max_skew_angle)\n', (144961, 144977), True, 'import numpy as np\n'), ((149672, 149686), 'numpy.unique', 'np.unique', (['cds'], {}), '(cds)\n', (149681, 149686), True, 'import numpy as np\n'), ((151656, 151666), 'io.StringIO', 'StringIO', ([], {}), '()\n', (151664, 151666), False, 'from io import StringIO\n'), ((151683, 151710), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 's'}), '(file=s)\n', (151702, 151710), False, 'import traceback\n'), ((201276, 201299), 'pyNastran.bdf.mesh_utils.delete_bad_elements.tri_quality', 'tri_quality', (['p1', 'p2', 'p3'], {}), '(p1, p2, p3)\n', (201287, 201299), False, 'from pyNastran.bdf.mesh_utils.delete_bad_elements import tri_quality, quad_quality, get_min_max_theta\n'), ((238733, 238754), 'numpy.where', 'np.where', (['(pids == pid)'], {}), '(pids == pid)\n', (238741, 238754), True, 'import numpy as np\n'), ((240818, 240837), 'numpy.where', 'np.where', (['(mids == 0)'], {}), '(mids == 0)\n', (240826, 240837), True, 'import numpy as np\n'), ((244745, 244858), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['subcase_id'], {'header': '"""Pressure"""', 'title': '"""Pressure"""', 'location': '"""centroid"""', 'scalar': 'centroidal_pressures'}), "(subcase_id, header='Pressure', title='Pressure', location=\n 'centroid', scalar=centroidal_pressures)\n", (244754, 244858), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((245230, 245258), 'numpy.linalg.norm', 'np.linalg.norm', (['fxyz'], {'axis': '(1)'}), '(fxyz, axis=1)\n', (245244, 245258), True, 'import numpy as np\n'), ((245289, 245317), 'numpy.linalg.norm', 'np.linalg.norm', (['mxyz'], {'axis': '(1)'}), '(mxyz, axis=1)\n', (245303, 245317), True, 'import numpy as np\n'), ((247187, 247205), 'numpy.linalg.norm', 'norm', (['t123'], {'axis': '(1)'}), '(t123, axis=1)\n', (247191, 247205), False, 'from numpy.linalg import norm\n'), ((247373, 247467), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['subcase_id'], {'header': '"""SPCDx"""', 'title': '"""SPCDx"""', 'location': '"""node"""', 'scalar': 'forces[:, 0]'}), "(subcase_id, header='SPCDx', title='SPCDx', location='node',\n scalar=forces[:, 0])\n", (247382, 247467), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((247540, 247634), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['subcase_id'], {'header': '"""SPCDy"""', 'title': '"""SPCDy"""', 'location': '"""node"""', 'scalar': 'forces[:, 1]'}), "(subcase_id, header='SPCDy', title='SPCDy', location='node',\n scalar=forces[:, 1])\n", (247549, 247634), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((247707, 247801), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['subcase_id'], {'header': '"""SPCDz"""', 'title': '"""SPCDz"""', 'location': '"""node"""', 'scalar': 'forces[:, 2]'}), "(subcase_id, header='SPCDz', title='SPCDz', location='node',\n scalar=forces[:, 2])\n", (247716, 247801), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((247876, 247969), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['subcase_id'], {'header': '"""SPCD XYZ"""', 'title': '"""SPCD XYZ"""', 'location': '"""node"""', 'scalar': 'tnorm'}), "(subcase_id, header='SPCD XYZ', title='SPCD XYZ', location='node',\n scalar=tnorm)\n", (247885, 247969), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((268543, 268573), 'numpy.degrees', 'np.degrees', (['min_interior_angle'], {}), '(min_interior_angle)\n', (268553, 268573), True, 'import numpy as np\n'), ((268734, 268764), 'numpy.degrees', 'np.degrees', (['max_interior_angle'], {}), '(max_interior_angle)\n', (268744, 268764), True, 'import numpy as np\n'), ((268926, 268950), 'numpy.degrees', 'np.degrees', (['dideal_theta'], {}), '(dideal_theta)\n', (268936, 268950), True, 'import numpy as np\n'), ((271425, 271448), 'numpy.isfinite', 'np.isfinite', (['area_ratio'], {}), '(area_ratio)\n', (271436, 271448), True, 'import numpy as np\n'), ((271454, 271475), 'numpy.nanmax', 'np.nanmax', (['area_ratio'], {}), '(area_ratio)\n', (271463, 271475), True, 'import numpy as np\n'), ((271827, 271851), 'numpy.isfinite', 'np.isfinite', (['taper_ratio'], {}), '(taper_ratio)\n', (271838, 271851), True, 'import numpy as np\n'), ((271857, 271879), 'numpy.nanmax', 'np.nanmax', (['taper_ratio'], {}), '(taper_ratio)\n', (271866, 271879), True, 'import numpy as np\n'), ((280016, 280095), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': 'tword', 'title': 'tword', 'location': '"""centroid"""', 'scalar': 'thicknessi'}), "(0, header=tword, title=tword, location='centroid', scalar=thicknessi)\n", (280025, 280095), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((281588, 281686), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""E_11"""', 'title': '"""E_11"""', 'location': '"""centroid"""', 'scalar': 'e11', 'data_format': '"""%.3e"""'}), "(0, header='E_11', title='E_11', location='centroid', scalar=e11,\n data_format='%.3e')\n", (281597, 281686), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((281753, 281851), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""E_22"""', 'title': '"""E_22"""', 'location': '"""centroid"""', 'scalar': 'e22', 'data_format': '"""%.3e"""'}), "(0, header='E_22', title='E_22', location='centroid', scalar=e22,\n data_format='%.3e')\n", (281762, 281851), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((282829, 282966), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""IsIsotropic?"""', 'title': '"""IsIsotropic?"""', 'location': '"""centroid"""', 'scalar': 'is_isotropic', 'data_format': '"""%i"""', 'mask_value': '(-1)'}), "(0, header='IsIsotropic?', title='IsIsotropic?', location=\n 'centroid', scalar=is_isotropic, data_format='%i', mask_value=-1)\n", (282838, 282966), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((24257, 24287), 'os.path.splitext', 'os.path.splitext', (['bdf_filename'], {}), '(bdf_filename)\n', (24273, 24287), False, 'import os\n'), ((47721, 47747), 'vtk.vtkUnsignedCharArray', 'vtk.vtkUnsignedCharArray', ([], {}), '()\n', (47745, 47747), False, 'import vtk\n'), ((59474, 59489), 'vtk.vtkVertex', 'vtk.vtkVertex', ([], {}), '()\n', (59487, 59489), False, 'import vtk\n'), ((64491, 64501), 'io.StringIO', 'StringIO', ([], {}), '()\n', (64499, 64501), False, 'from io import StringIO\n'), ((64522, 64549), 'traceback.print_exc', 'traceback.print_exc', ([], {'file': 's'}), '(file=s)\n', (64541, 64549), False, 'import traceback\n'), ((77458, 77467), 'vtk.vtkQuad', 'vtkQuad', ([], {}), '()\n', (77465, 77467), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((115809, 115832), 'pyNastran.bdf.mesh_utils.delete_bad_elements.tri_quality', 'tri_quality', (['p1', 'p2', 'p3'], {}), '(p1, p2, p3)\n', (115820, 115832), False, 'from pyNastran.bdf.mesh_utils.delete_bad_elements import tri_quality, quad_quality, get_min_max_theta\n'), ((115987, 116013), 'numpy.cross', 'np.cross', (['(p1 - p2)', '(p1 - p3)'], {}), '(p1 - p2, p1 - p3)\n', (115995, 116013), True, 'import numpy as np\n'), ((129259, 129285), 'vtk.vtkUnsignedCharArray', 'vtk.vtkUnsignedCharArray', ([], {}), '()\n', (129283, 129285), False, 'import vtk\n'), ((141837, 141863), 'numpy.degrees', 'np.degrees', (['max_warp_angle'], {}), '(max_warp_angle)\n', (141847, 141863), True, 'import numpy as np\n'), ((144703, 144733), 'numpy.degrees', 'np.degrees', (['min_interior_angle'], {}), '(min_interior_angle)\n', (144713, 144733), True, 'import numpy as np\n'), ((144894, 144924), 'numpy.degrees', 'np.degrees', (['max_interior_angle'], {}), '(max_interior_angle)\n', (144904, 144924), True, 'import numpy as np\n'), ((161419, 161441), 'vtk.vtkQuadraticTriangle', 'vtkQuadraticTriangle', ([], {}), '()\n', (161439, 161441), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((161701, 161714), 'vtk.vtkTriangle', 'vtkTriangle', ([], {}), '()\n', (161712, 161714), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((200778, 200800), 'vtk.vtkQuadraticTriangle', 'vtkQuadraticTriangle', ([], {}), '()\n', (200798, 200800), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((201060, 201073), 'vtk.vtkTriangle', 'vtkTriangle', ([], {}), '()\n', (201071, 201073), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((202944, 202967), 'pyNastran.bdf.mesh_utils.delete_bad_elements.tri_quality', 'tri_quality', (['p1', 'p2', 'p3'], {}), '(p1, p2, p3)\n', (202955, 202967), False, 'from pyNastran.bdf.mesh_utils.delete_bad_elements import tri_quality, quad_quality, get_min_max_theta\n'), ((238890, 238911), 'numpy.where', 'np.where', (['(pids == pid)'], {}), '(pids == pid)\n', (238898, 238911), True, 'import numpy as np\n'), ((244674, 244702), 'numpy.abs', 'np.abs', (['centroidal_pressures'], {}), '(centroidal_pressures)\n', (244680, 244702), True, 'import numpy as np\n'), ((245651, 245855), 'pyNastran.gui.gui_objects.displacements.ForceTableResults', 'ForceTableResults', (['subcase_id', 'titles', 'headers', 'fxyz', 'fscalar', 'scales'], {'data_formats': 'None', 'nlabels': 'None', 'labelsize': 'None', 'ncolors': 'None', 'colormap': 'colormap', 'set_max_min': '(False)', 'uname': '"""NastranGeometry"""'}), "(subcase_id, titles, headers, fxyz, fscalar, scales,\n data_formats=None, nlabels=None, labelsize=None, ncolors=None, colormap\n =colormap, set_max_min=False, uname='NastranGeometry')\n", (245668, 245855), False, 'from pyNastran.gui.gui_objects.displacements import ForceTableResults, ElementalTableResults\n'), ((246522, 246726), 'pyNastran.gui.gui_objects.displacements.ForceTableResults', 'ForceTableResults', (['subcase_id', 'titles', 'headers', 'mxyz', 'mscalar', 'scales'], {'data_formats': 'None', 'nlabels': 'None', 'labelsize': 'None', 'ncolors': 'None', 'colormap': 'colormap', 'set_max_min': '(False)', 'uname': '"""NastranGeometry"""'}), "(subcase_id, titles, headers, mxyz, mscalar, scales,\n data_formats=None, nlabels=None, labelsize=None, ncolors=None, colormap\n =colormap, set_max_min=False, uname='NastranGeometry')\n", (246539, 246726), False, 'from pyNastran.gui.gui_objects.displacements import ForceTableResults, ElementalTableResults\n'), ((249733, 249767), 'os.path.splitext', 'os.path.splitext', (['results_filename'], {}), '(results_filename)\n', (249749, 249767), False, 'import os\n'), ((250731, 250755), 'pyNastran.op2.op2.OP2', 'OP2', ([], {'log': 'log', 'debug': '(True)'}), '(log=log, debug=True)\n', (250734, 250755), False, 'from pyNastran.op2.op2 import OP2\n'), ((272425, 272451), 'numpy.degrees', 'np.degrees', (['max_warp_angle'], {}), '(max_warp_angle)\n', (272435, 272451), True, 'import numpy as np\n'), ((276235, 276265), 'numpy.degrees', 'np.degrees', (['min_interior_angle'], {}), '(min_interior_angle)\n', (276245, 276265), True, 'import numpy as np\n'), ((276426, 276456), 'numpy.degrees', 'np.degrees', (['max_interior_angle'], {}), '(max_interior_angle)\n', (276436, 276456), True, 'import numpy as np\n'), ((282420, 282518), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""E_33"""', 'title': '"""E_33"""', 'location': '"""centroid"""', 'scalar': 'e33', 'data_format': '"""%.3e"""'}), "(0, header='E_33', title='E_33', location='centroid', scalar=e33,\n data_format='%.3e')\n", (282429, 282518), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((283309, 283323), 'numpy.nanmax', 'np.nanmax', (['e11'], {}), '(e11)\n', (283318, 283323), True, 'import numpy as np\n'), ((283354, 283446), 'pyNastran.gui.gui_objects.gui_result.GuiResult', 'GuiResult', (['(0)'], {'header': '"""E"""', 'title': '"""E"""', 'location': '"""centroid"""', 'scalar': 'e11', 'data_format': '"""%.3e"""'}), "(0, header='E', title='E', location='centroid', scalar=e11,\n data_format='%.3e')\n", (283363, 283446), False, 'from pyNastran.gui.gui_objects.gui_result import GuiResult, NormalResult\n'), ((59029, 59063), 'numpy.searchsorted', 'np.searchsorted', (['self.node_ids', 'n1'], {}), '(self.node_ids, n1)\n', (59044, 59063), True, 'import numpy as np\n'), ((59229, 59263), 'numpy.searchsorted', 'np.searchsorted', (['self.node_ids', 'n2'], {}), '(self.node_ids, n2)\n', (59244, 59263), True, 'import numpy as np\n'), ((75788, 75805), 'numpy.array', 'np.array', (['cpoints'], {}), '(cpoints)\n', (75796, 75805), True, 'import numpy as np\n'), ((75854, 75871), 'numpy.array', 'np.array', (['cpoints'], {}), '(cpoints)\n', (75862, 75871), True, 'import numpy as np\n'), ((78537, 78559), 'numpy.vstack', 'np.vstack', (['max_cpoints'], {}), '(max_cpoints)\n', (78546, 78559), True, 'import numpy as np\n'), ((78617, 78639), 'numpy.vstack', 'np.vstack', (['min_cpoints'], {}), '(min_cpoints)\n', (78626, 78639), True, 'import numpy as np\n'), ((90731, 90785), 'pyNastran.bdf.mesh_utils.mpc_dependency.get_mpc_node_ids', 'get_mpc_node_ids', (['model', 'mpc_id'], {'stop_on_failure': '(False)'}), '(model, mpc_id, stop_on_failure=False)\n', (90747, 90785), False, 'from pyNastran.bdf.mesh_utils.mpc_dependency import get_mpc_node_ids\n'), ((115394, 115429), 'numpy.searchsorted', 'np.searchsorted', (['all_nids', 'nids[:3]'], {}), '(all_nids, nids[:3])\n', (115409, 115429), True, 'import numpy as np\n'), ((115646, 115677), 'numpy.searchsorted', 'np.searchsorted', (['all_nids', 'nids'], {}), '(all_nids, nids)\n', (115661, 115677), True, 'import numpy as np\n'), ((116845, 116879), 'pyNastran.bdf.mesh_utils.delete_bad_elements.quad_quality', 'quad_quality', (['elem', 'p1', 'p2', 'p3', 'p4'], {}), '(elem, p1, p2, p3, p4)\n', (116857, 116879), False, 'from pyNastran.bdf.mesh_utils.delete_bad_elements import tri_quality, quad_quality, get_min_max_theta\n'), ((117071, 117097), 'numpy.cross', 'np.cross', (['(p1 - p3)', '(p2 - p4)'], {}), '(p1 - p3, p2 - p4)\n', (117079, 117097), True, 'import numpy as np\n'), ((162858, 162880), 'vtk.vtkQuadraticTriangle', 'vtkQuadraticTriangle', ([], {}), '()\n', (162878, 162880), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((163140, 163153), 'vtk.vtkTriangle', 'vtkTriangle', ([], {}), '()\n', (163151, 163153), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((164785, 164794), 'vtk.vtkQuad', 'vtkQuad', ([], {}), '()\n', (164792, 164794), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((202388, 202410), 'vtk.vtkQuadraticTriangle', 'vtkQuadraticTriangle', ([], {}), '()\n', (202408, 202410), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((202670, 202683), 'vtk.vtkTriangle', 'vtkTriangle', ([], {}), '()\n', (202681, 202683), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((204471, 204508), 'pyNastran.bdf.mesh_utils.delete_bad_elements.quad_quality', 'quad_quality', (['element', 'p1', 'p2', 'p3', 'p4'], {}), '(element, p1, p2, p3, p4)\n', (204483, 204508), False, 'from pyNastran.bdf.mesh_utils.delete_bad_elements import tri_quality, quad_quality, get_min_max_theta\n'), ((204698, 204707), 'vtk.vtkQuad', 'vtkQuad', ([], {}), '()\n', (204705, 204707), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((239098, 239119), 'numpy.where', 'np.where', (['(pids == pid)'], {}), '(pids == pid)\n', (239106, 239119), True, 'import numpy as np\n'), ((281075, 281099), 'numpy.array_equal', 'np.array_equal', (['e11', 'e22'], {}), '(e11, e22)\n', (281089, 281099), True, 'import numpy as np\n'), ((281104, 281128), 'numpy.array_equal', 'np.array_equal', (['e11', 'e33'], {}), '(e11, e33)\n', (281118, 281128), True, 'import numpy as np\n'), ((281202, 281226), 'numpy.array_equal', 'np.array_equal', (['e11', 'e22'], {}), '(e11, e22)\n', (281216, 281226), True, 'import numpy as np\n'), ((283289, 283303), 'numpy.nanmax', 'np.nanmax', (['e11'], {}), '(e11)\n', (283298, 283303), True, 'import numpy as np\n'), ((28030, 28052), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (28050, 28052), False, 'import traceback\n'), ((116444, 116479), 'numpy.searchsorted', 'np.searchsorted', (['all_nids', 'nids[:4]'], {}), '(all_nids, nids[:4])\n', (116459, 116479), True, 'import numpy as np\n'), ((116700, 116731), 'numpy.searchsorted', 'np.searchsorted', (['all_nids', 'nids'], {}), '(all_nids, nids)\n', (116715, 116731), True, 'import numpy as np\n'), ((117519, 117550), 'numpy.searchsorted', 'np.searchsorted', (['all_nids', 'nids'], {}), '(all_nids, nids)\n', (117534, 117550), True, 'import numpy as np\n'), ((117625, 117659), 'pyNastran.bdf.mesh_utils.delete_bad_elements.quad_quality', 'quad_quality', (['elem', 'p1', 'p2', 'p3', 'p4'], {}), '(elem, p1, p2, p3, p4)\n', (117637, 117659), False, 'from pyNastran.bdf.mesh_utils.delete_bad_elements import tri_quality, quad_quality, get_min_max_theta\n'), ((117851, 117877), 'numpy.cross', 'np.cross', (['(p1 - p3)', '(p2 - p4)'], {}), '(p1 - p3, p2 - p4)\n', (117859, 117877), True, 'import numpy as np\n'), ((205745, 205782), 'pyNastran.bdf.mesh_utils.delete_bad_elements.quad_quality', 'quad_quality', (['element', 'p1', 'p2', 'p3', 'p4'], {}), '(element, p1, p2, p3, p4)\n', (205757, 205782), False, 'from pyNastran.bdf.mesh_utils.delete_bad_elements import tri_quality, quad_quality, get_min_max_theta\n'), ((239722, 239743), 'numpy.where', 'np.where', (['(pids == pid)'], {}), '(pids == pid)\n', (239730, 239743), True, 'import numpy as np\n'), ((118308, 118339), 'numpy.searchsorted', 'np.searchsorted', (['all_nids', 'nids'], {}), '(all_nids, nids)\n', (118323, 118339), True, 'import numpy as np\n'), ((118414, 118471), 'pyNastran.bdf.mesh_utils.delete_bad_elements.get_min_max_theta', 'get_min_max_theta', (['_ctetra_faces', 'nids', 'nid_map', 'xyz_cid0'], {}), '(_ctetra_faces, nids, nid_map, xyz_cid0)\n', (118431, 118471), False, 'from pyNastran.bdf.mesh_utils.delete_bad_elements import tri_quality, quad_quality, get_min_max_theta\n'), ((165891, 165909), 'vtk.vtkQuadraticQuad', 'vtkQuadraticQuad', ([], {}), '()\n', (165907, 165909), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((166239, 166248), 'vtk.vtkQuad', 'vtkQuad', ([], {}), '()\n', (166246, 166248), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((206016, 206034), 'vtk.vtkQuadraticQuad', 'vtkQuadraticQuad', ([], {}), '()\n', (206032, 206034), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((206364, 206373), 'vtk.vtkQuad', 'vtkQuad', ([], {}), '()\n', (206371, 206373), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((207408, 207445), 'pyNastran.bdf.mesh_utils.delete_bad_elements.quad_quality', 'quad_quality', (['element', 'p1', 'p2', 'p3', 'p4'], {}), '(element, p1, p2, p3, p4)\n', (207420, 207445), False, 'from pyNastran.bdf.mesh_utils.delete_bad_elements import tri_quality, quad_quality, get_min_max_theta\n'), ((240480, 240501), 'numpy.where', 'np.where', (['(pids == pid)'], {}), '(pids == pid)\n', (240488, 240501), True, 'import numpy as np\n'), ((118893, 118924), 'numpy.searchsorted', 'np.searchsorted', (['all_nids', 'nids'], {}), '(all_nids, nids)\n', (118908, 118924), True, 'import numpy as np\n'), ((118999, 119055), 'pyNastran.bdf.mesh_utils.delete_bad_elements.get_min_max_theta', 'get_min_max_theta', (['_chexa_faces', 'nids', 'nid_map', 'xyz_cid0'], {}), '(_chexa_faces, nids, nid_map, xyz_cid0)\n', (119016, 119055), False, 'from pyNastran.bdf.mesh_utils.delete_bad_elements import tri_quality, quad_quality, get_min_max_theta\n'), ((167333, 167357), 'vtk.vtkBiQuadraticQuad', 'vtk.vtkBiQuadraticQuad', ([], {}), '()\n', (167355, 167357), False, 'import vtk\n'), ((167757, 167766), 'vtk.vtkQuad', 'vtkQuad', ([], {}), '()\n', (167764, 167766), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((168106, 168116), 'vtk.vtkTetra', 'vtkTetra', ([], {}), '()\n', (168114, 168116), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((207679, 207703), 'vtk.vtkBiQuadraticQuad', 'vtk.vtkBiQuadraticQuad', ([], {}), '()\n', (207701, 207703), False, 'import vtk\n'), ((208103, 208112), 'vtk.vtkQuad', 'vtkQuad', ([], {}), '()\n', (208110, 208112), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((208452, 208462), 'vtk.vtkTetra', 'vtkTetra', ([], {}), '()\n', (208460, 208462), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((209156, 209221), 'pyNastran.bdf.mesh_utils.delete_bad_elements.get_min_max_theta', 'get_min_max_theta', (['_ctetra_faces', 'node_ids[:4]', 'nid_map', 'xyz_cid0'], {}), '(_ctetra_faces, node_ids[:4], nid_map, xyz_cid0)\n', (209173, 209221), False, 'from pyNastran.bdf.mesh_utils.delete_bad_elements import tri_quality, quad_quality, get_min_max_theta\n'), ((119482, 119513), 'numpy.searchsorted', 'np.searchsorted', (['all_nids', 'nids'], {}), '(all_nids, nids)\n', (119497, 119513), True, 'import numpy as np\n'), ((119588, 119645), 'pyNastran.bdf.mesh_utils.delete_bad_elements.get_min_max_theta', 'get_min_max_theta', (['_cpenta_faces', 'nids', 'nid_map', 'xyz_cid0'], {}), '(_cpenta_faces, nids, nid_map, xyz_cid0)\n', (119605, 119645), False, 'from pyNastran.bdf.mesh_utils.delete_bad_elements import tri_quality, quad_quality, get_min_max_theta\n'), ((210538, 210603), 'pyNastran.bdf.mesh_utils.delete_bad_elements.get_min_max_theta', 'get_min_max_theta', (['_ctetra_faces', 'node_ids[:4]', 'nid_map', 'xyz_cid0'], {}), '(_ctetra_faces, node_ids[:4], nid_map, xyz_cid0)\n', (210555, 210603), False, 'from pyNastran.bdf.mesh_utils.delete_bad_elements import tri_quality, quad_quality, get_min_max_theta\n'), ((120104, 120135), 'numpy.searchsorted', 'np.searchsorted', (['all_nids', 'nids'], {}), '(all_nids, nids)\n', (120119, 120135), True, 'import numpy as np\n'), ((120210, 120267), 'pyNastran.bdf.mesh_utils.delete_bad_elements.get_min_max_theta', 'get_min_max_theta', (['_cpyram_faces', 'nids', 'nid_map', 'xyz_cid0'], {}), '(_cpyram_faces, nids, nid_map, xyz_cid0)\n', (120227, 120267), False, 'from pyNastran.bdf.mesh_utils.delete_bad_elements import tri_quality, quad_quality, get_min_max_theta\n'), ((169117, 169136), 'vtk.vtkQuadraticTetra', 'vtkQuadraticTetra', ([], {}), '()\n', (169134, 169136), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((169606, 169616), 'vtk.vtkTetra', 'vtkTetra', ([], {}), '()\n', (169614, 169616), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((170028, 170038), 'vtk.vtkWedge', 'vtkWedge', ([], {}), '()\n', (170036, 170038), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((209624, 209643), 'vtk.vtkQuadraticTetra', 'vtkQuadraticTetra', ([], {}), '()\n', (209641, 209643), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((210113, 210123), 'vtk.vtkTetra', 'vtkTetra', ([], {}), '()\n', (210121, 210123), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((210696, 210706), 'vtk.vtkWedge', 'vtkWedge', ([], {}), '()\n', (210704, 210706), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((211457, 211522), 'pyNastran.bdf.mesh_utils.delete_bad_elements.get_min_max_theta', 'get_min_max_theta', (['_cpenta_faces', 'node_ids[:6]', 'nid_map', 'xyz_cid0'], {}), '(_cpenta_faces, node_ids[:6], nid_map, xyz_cid0)\n', (211474, 211522), False, 'from pyNastran.bdf.mesh_utils.delete_bad_elements import tri_quality, quad_quality, get_min_max_theta\n'), ((121166, 121197), 'numpy.searchsorted', 'np.searchsorted', (['all_nids', 'nids'], {}), '(all_nids, nids)\n', (121181, 121197), True, 'import numpy as np\n'), ((213191, 213256), 'pyNastran.bdf.mesh_utils.delete_bad_elements.get_min_max_theta', 'get_min_max_theta', (['_cpenta_faces', 'node_ids[:6]', 'nid_map', 'xyz_cid0'], {}), '(_cpenta_faces, node_ids[:6], nid_map, xyz_cid0)\n', (213208, 213256), False, 'from pyNastran.bdf.mesh_utils.delete_bad_elements import tri_quality, quad_quality, get_min_max_theta\n'), ((121716, 121747), 'numpy.searchsorted', 'np.searchsorted', (['all_nids', 'nids'], {}), '(all_nids, nids)\n', (121731, 121747), True, 'import numpy as np\n'), ((171096, 171115), 'vtk.vtkQuadraticWedge', 'vtkQuadraticWedge', ([], {}), '()\n', (171113, 171115), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((171805, 171815), 'vtk.vtkWedge', 'vtkWedge', ([], {}), '()\n', (171813, 171815), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((172588, 172603), 'vtk.vtkHexahedron', 'vtkHexahedron', ([], {}), '()\n', (172601, 172603), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((211925, 211944), 'vtk.vtkQuadraticWedge', 'vtkQuadraticWedge', ([], {}), '()\n', (211942, 211944), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((212634, 212644), 'vtk.vtkWedge', 'vtkWedge', ([], {}), '()\n', (212642, 212644), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((213578, 213593), 'vtk.vtkHexahedron', 'vtkHexahedron', ([], {}), '()\n', (213591, 213593), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((214256, 214320), 'pyNastran.bdf.mesh_utils.delete_bad_elements.get_min_max_theta', 'get_min_max_theta', (['_chexa_faces', 'node_ids[:8]', 'nid_map', 'xyz_cid0'], {}), '(_chexa_faces, node_ids[:8], nid_map, xyz_cid0)\n', (214273, 214320), False, 'from pyNastran.bdf.mesh_utils.delete_bad_elements import tri_quality, quad_quality, get_min_max_theta\n'), ((122061, 122092), 'numpy.searchsorted', 'np.searchsorted', (['all_nids', 'nids'], {}), '(all_nids, nids)\n', (122076, 122092), True, 'import numpy as np\n'), ((122172, 122185), 'numpy.linalg.norm', 'norm', (['(p2 - p1)'], {}), '(p2 - p1)\n', (122176, 122185), False, 'from numpy.linalg import norm\n'), ((216414, 216478), 'pyNastran.bdf.mesh_utils.delete_bad_elements.get_min_max_theta', 'get_min_max_theta', (['_chexa_faces', 'node_ids[:8]', 'nid_map', 'xyz_cid0'], {}), '(_chexa_faces, node_ids[:8], nid_map, xyz_cid0)\n', (216431, 216478), False, 'from pyNastran.bdf.mesh_utils.delete_bad_elements import tri_quality, quad_quality, get_min_max_theta\n'), ((122499, 122530), 'numpy.searchsorted', 'np.searchsorted', (['all_nids', 'nids'], {}), '(all_nids, nids)\n', (122514, 122530), True, 'import numpy as np\n'), ((122610, 122623), 'numpy.linalg.norm', 'norm', (['(p2 - p1)'], {}), '(p2 - p1)\n', (122614, 122623), False, 'from numpy.linalg import norm\n'), ((173531, 173555), 'vtk.vtkQuadraticHexahedron', 'vtkQuadraticHexahedron', ([], {}), '()\n', (173553, 173555), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((174518, 174533), 'vtk.vtkHexahedron', 'vtkHexahedron', ([], {}), '()\n', (174531, 174533), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((175481, 175493), 'vtk.vtkPyramid', 'vtkPyramid', ([], {}), '()\n', (175491, 175493), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((214681, 214705), 'vtk.vtkQuadraticHexahedron', 'vtkQuadraticHexahedron', ([], {}), '()\n', (214703, 214705), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((215668, 215683), 'vtk.vtkHexahedron', 'vtkHexahedron', ([], {}), '()\n', (215681, 215683), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((216791, 216803), 'vtk.vtkPyramid', 'vtkPyramid', ([], {}), '()\n', (216801, 216803), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((217313, 217378), 'pyNastran.bdf.mesh_utils.delete_bad_elements.get_min_max_theta', 'get_min_max_theta', (['_cpyram_faces', 'node_ids[:5]', 'nid_map', 'xyz_cid0'], {}), '(_cpyram_faces, node_ids[:5], nid_map, xyz_cid0)\n', (217330, 217378), False, 'from pyNastran.bdf.mesh_utils.delete_bad_elements import tri_quality, quad_quality, get_min_max_theta\n'), ((122872, 122903), 'numpy.searchsorted', 'np.searchsorted', (['all_nids', 'nids'], {}), '(all_nids, nids)\n', (122887, 122903), True, 'import numpy as np\n'), ((122983, 122996), 'numpy.linalg.norm', 'norm', (['(p2 - p1)'], {}), '(p2 - p1)\n', (122987, 122996), False, 'from numpy.linalg import norm\n'), ((176854, 176866), 'vtk.vtkPyramid', 'vtkPyramid', ([], {}), '()\n', (176864, 176866), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((218325, 218337), 'vtk.vtkPyramid', 'vtkPyramid', ([], {}), '()\n', (218335, 218337), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((218923, 218988), 'pyNastran.bdf.mesh_utils.delete_bad_elements.get_min_max_theta', 'get_min_max_theta', (['_cpyram_faces', 'node_ids[:5]', 'nid_map', 'xyz_cid0'], {}), '(_cpyram_faces, node_ids[:5], nid_map, xyz_cid0)\n', (218940, 218988), False, 'from pyNastran.bdf.mesh_utils.delete_bad_elements import tri_quality, quad_quality, get_min_max_theta\n'), ((124058, 124089), 'numpy.searchsorted', 'np.searchsorted', (['all_nids', 'nids'], {}), '(all_nids, nids)\n', (124073, 124089), True, 'import numpy as np\n'), ((125037, 125068), 'numpy.searchsorted', 'np.searchsorted', (['all_nids', 'nids'], {}), '(all_nids, nids)\n', (125052, 125068), True, 'import numpy as np\n'), ((125148, 125161), 'numpy.linalg.norm', 'norm', (['(p2 - p1)'], {}), '(p2 - p1)\n', (125152, 125161), False, 'from numpy.linalg import norm\n'), ((179070, 179085), 'vtk.vtkVertex', 'vtk.vtkVertex', ([], {}), '()\n', (179083, 179085), False, 'import vtk\n'), ((179705, 179718), 'vtk.vtkLine', 'vtk.vtkLine', ([], {}), '()\n', (179716, 179718), False, 'import vtk\n'), ((180769, 180805), 'numpy.searchsorted', 'np.searchsorted', (['nids', 'element.nodes'], {}), '(nids, element.nodes)\n', (180784, 180805), True, 'import numpy as np\n'), ((180956, 180969), 'vtk.vtkLine', 'vtk.vtkLine', ([], {}), '()\n', (180967, 180969), False, 'import vtk\n'), ((220662, 220677), 'vtk.vtkVertex', 'vtk.vtkVertex', ([], {}), '()\n', (220675, 220677), False, 'import vtk\n'), ((221217, 221230), 'vtk.vtkLine', 'vtk.vtkLine', ([], {}), '()\n', (221228, 221230), False, 'import vtk\n'), ((222919, 222936), 'numpy.linalg.norm', 'norm', (['(xyz2 - xyz1)'], {}), '(xyz2 - xyz1)\n', (222923, 222936), False, 'from numpy.linalg import norm\n'), ((223007, 223020), 'vtk.vtkLine', 'vtk.vtkLine', ([], {}), '()\n', (223018, 223020), False, 'import vtk\n'), ((124824, 124839), 'numpy.hstack', 'np.hstack', (['nids'], {}), '(nids)\n', (124833, 124839), True, 'import numpy as np\n'), ((181726, 181762), 'numpy.searchsorted', 'np.searchsorted', (['nids', 'element.nodes'], {}), '(nids, element.nodes)\n', (181741, 181762), True, 'import numpy as np\n'), ((222439, 222475), 'numpy.searchsorted', 'np.searchsorted', (['nids', 'element.nodes'], {}), '(nids, element.nodes)\n', (222454, 222475), True, 'import numpy as np\n'), ((223786, 223822), 'numpy.searchsorted', 'np.searchsorted', (['nids', 'element.nodes'], {}), '(nids, element.nodes)\n', (223801, 223822), True, 'import numpy as np\n'), ((224449, 224471), 'vtk.vtkQuadraticEdge', 'vtk.vtkQuadraticEdge', ([], {}), '()\n', (224469, 224471), False, 'import vtk\n'), ((182284, 182306), 'vtk.vtkQuadraticEdge', 'vtk.vtkQuadraticEdge', ([], {}), '()\n', (182304, 182306), False, 'import vtk\n'), ((182417, 182430), 'vtk.vtkLine', 'vtk.vtkLine', ([], {}), '()\n', (182428, 182430), False, 'import vtk\n'), ((225398, 225435), 'pyNastran.bdf.mesh_utils.delete_bad_elements.quad_quality', 'quad_quality', (['element', 'p1', 'p2', 'p3', 'p4'], {}), '(element, p1, p2, p3, p4)\n', (225410, 225435), False, 'from pyNastran.bdf.mesh_utils.delete_bad_elements import tri_quality, quad_quality, get_min_max_theta\n'), ((183385, 183394), 'vtk.vtkQuad', 'vtkQuad', ([], {}), '()\n', (183392, 183394), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((183452, 183470), 'vtk.vtkQuadraticQuad', 'vtkQuadraticQuad', ([], {}), '()\n', (183468, 183470), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((186529, 186542), 'vtk.vtkTriangle', 'vtkTriangle', ([], {}), '()\n', (186540, 186542), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((187812, 187825), 'vtk.vtkLine', 'vtk.vtkLine', ([], {}), '()\n', (187823, 187825), False, 'import vtk\n'), ((225716, 225725), 'vtk.vtkQuad', 'vtkQuad', ([], {}), '()\n', (225723, 225725), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((225783, 225801), 'vtk.vtkQuadraticQuad', 'vtkQuadraticQuad', ([], {}), '()\n', (225799, 225801), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((227144, 227167), 'pyNastran.bdf.mesh_utils.delete_bad_elements.tri_quality', 'tri_quality', (['p1', 'p2', 'p3'], {}), '(p1, p2, p3)\n', (227155, 227167), False, 'from pyNastran.bdf.mesh_utils.delete_bad_elements import tri_quality, quad_quality, get_min_max_theta\n'), ((228435, 228448), 'vtk.vtkLine', 'vtk.vtkLine', ([], {}), '()\n', (228446, 228448), False, 'import vtk\n'), ((184275, 184288), 'vtk.vtkTriangle', 'vtkTriangle', ([], {}), '()\n', (184286, 184288), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((184346, 184368), 'vtk.vtkQuadraticTriangle', 'vtkQuadraticTriangle', ([], {}), '()\n', (184366, 184368), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((187014, 187023), 'vtk.vtkQuad', 'vtkQuad', ([], {}), '()\n', (187021, 187023), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((226606, 226619), 'vtk.vtkTriangle', 'vtkTriangle', ([], {}), '()\n', (226617, 226619), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((226677, 226699), 'vtk.vtkQuadraticTriangle', 'vtkQuadraticTriangle', ([], {}), '()\n', (226697, 226699), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((230686, 230709), 'pyNastran.bdf.mesh_utils.delete_bad_elements.tri_quality', 'tri_quality', (['p1', 'p2', 'p3'], {}), '(p1, p2, p3)\n', (230697, 230709), False, 'from pyNastran.bdf.mesh_utils.delete_bad_elements import tri_quality, quad_quality, get_min_max_theta\n'), ((230874, 230887), 'vtk.vtkTriangle', 'vtkTriangle', ([], {}), '()\n', (230885, 230887), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((232683, 232696), 'vtk.vtkLine', 'vtk.vtkLine', ([], {}), '()\n', (232694, 232696), False, 'import vtk\n'), ((231353, 231390), 'pyNastran.bdf.mesh_utils.delete_bad_elements.quad_quality', 'quad_quality', (['element', 'p1', 'p2', 'p3', 'p4'], {}), '(element, p1, p2, p3, p4)\n', (231365, 231390), False, 'from pyNastran.bdf.mesh_utils.delete_bad_elements import tri_quality, quad_quality, get_min_max_theta\n'), ((231592, 231601), 'vtk.vtkQuad', 'vtkQuad', ([], {}), '()\n', (231599, 231601), False, 'from vtk import vtkTriangle, vtkQuad, vtkTetra, vtkWedge, vtkHexahedron, vtkQuadraticTriangle, vtkQuadraticQuad, vtkQuadraticTetra, vtkQuadraticWedge, vtkQuadraticHexahedron, vtkPyramid\n'), ((232565, 232586), 'numpy.hstack', 'np.hstack', (['genel_nids'], {}), '(genel_nids)\n', (232574, 232586), True, 'import numpy as np\n')] |
from .cantorProject.network import Network
from .cantorProject.tfp_trainer import tfp_Trainer, set_weights
from .cantorProject.sci_trainer import sci_Trainer
from .utils import plot
import tensorflow as tf
import numpy as np
import math
from tensorflow.keras.layers import Lambda
class GradientLayer(tf.keras.layers.Layer):
"""
Subclassed layer to compute general derivatives
"""
def __init__(self, R_model,q_model, **kwargs):
"""
Args:
R_model: keras network model to simulate R
q_model: keras network model to simulate q
"""
self.R_model = R_model
self.q_model = q_model
super().__init__(**kwargs)
def call(self, tx):
"""
Computing 1st and 2nd derivatives of the neural net.
Args:
tx: (z,t)
Returns:
u: network output.
du_dx: 1st derivative of x.
d2u_dx2: 2nd derivative of x.
Computing the first derivatives of the two neural networks
Args:
tx: (z,t)
Returns:
A : Area as computed from the output of R_model
q : Output of q_model
R : Output of R_model
dA_dt : First derivative of A w.r.t t
dq_dt : First derivative of q w.r.t t
dq_dz : First derivatice of q w.r.t z
"""
#print(tx)
#tx = tf.keras.layers.Concatenate(inputs)
#z=inputs[0]
#t=inputs[1]
with tf.GradientTape() as g1:
g1.watch(tx)
R = self.R_model(tx)
A = np.pi * (R**2)
dA_dtx = g1.batch_jacobian(A,tx)
dA_dt = dA_dtx[...,1]
with tf.GradientTape() as g2:
g2.watch(tx)
q = self.q_model(tx)
dq_dtx = g2.batch_jacobian(q,tx)
dq_dz = dq_dtx[...,0]
dq_dt = dq_dtx[...,1]
return A,q,R,dA_dt,dq_dt,dq_dz
class PINN:
def __init__(self,R_network,q_network):
"""
Args :
R_network : Keras network model to compute R
q_network : Keras network model to compute q
"""
self.R_network = R_network
self.q_network = q_network
self.grad = GradientLayer(self.R_network,self.q_network)
def build(self,delta_b,E,h,elasticity_func,R1,R2,CT,Ru,Rd,L,Reynolds_no,q_0):
"""
Builds the actual model
Args:
"""
z=tf.keras.layers.Input(shape=(1,))
t=tf.keras.layers.Input(shape=(1,))
#print("PINN")
#print(Ru)
#print(Rd)
concat_layer = tf.keras.layers.Concatenate()([z,t])
A,q,R,dA_dt,dq_dt,dq_dz=self.grad(concat_layer)
A_0,dl_dz = find_derivatives_l(self.R_network,self.q_network,Ru,Rd,L)((z,t))
"""
We divide the partial differential equation into two parts, p1 and p2
p1 -> du/dt + dq/dt = 0
p2 -> dq/dz + dl/dt = S1
"""
p1 = (dA_dt + dq_dz)**2
r0_grad= find_derivatives_r0(self.R_network,Ru,Rd,L)
dr0_dz,r0 = r0_grad(z)
df_dr0 = find_derivatives_f0()(r0)
t1 = Lambda(lambda ar: -(2*math.pi*ar[0]*ar[1])/(delta_b*Reynolds_no*ar[2]))((R,q,A))
#print(df_dr0)
#print("T2")
#t2 = Lambda(lambda ar: math.sqrt(math.pi) * elasticity_func(relaxed_radius_func(ar[0],ar[3],int(ar[4]),int(ar[5]))) + tf.math.sqrt(ar[1]) * ar[2])((z,A_0,df_dr0,float(Ru),Rd,L))
t2 = t2_class(Ru,Rd,L)((z,A_0,df_dr0))
S1 = Lambda(lambda ar: ar[3] + (2*tf.math.sqrt(ar[0])*(ar[4])-ar[0]*ar[1])*ar[2])((A,df_dr0,dr0_dz,t1,t2))
p2 = Lambda(lambda ar: tf.math.pow(ar[0] + ar[1] - ar[2],2))((dq_dt,dl_dz,S1))
u_eqn = p1 + p2
#For the inflow condition
z_inflow = tf.keras.layers.Input(shape=(1,))
t_inflow = tf.keras.layers.Input(shape = (1,))
concat_inflow = tf.keras.layers.Concatenate()([z_inflow,t_inflow])
q_bndry_inflow = self.q_network(concat_inflow)
#For the outflow condition
z_outflow = tf.keras.layers.Input(shape=(1,))
t_outflow = tf.keras.layers.Input(shape = (1,))
p_obj = p_class(self.R_network,self.q_network,Ru,Rd,L,E,h)
dp_bo_dt,p,dq_bo_dt,q_bo = p_obj((z_outflow,t_outflow))
u_bndry_outflow = dp_bo_dt - (R1 * dq_bo_dt - (p/(R2*CT)) + q_bo*(R1+R2)/(R2*CT) )
#print(S1)
#print(dq_dt)
#print(dl_dz)
#print(p2)
return tf.keras.models.Model(
inputs = [z,t,z_inflow,t_inflow,z_outflow,t_outflow],
outputs = [u_eqn,q_bndry_inflow,u_bndry_outflow]
)
class t2_class(tf.keras.layers.Layer):
def __init__(self,Ru,Rd,L,**kwargs):
super().__init__(self,**kwargs)
self.Ru = Ru
self.Rd = Rd
self.L = L
def call(self,input):
z = input[0]
A_0 = input[1]
df_dr0 = input[2]
return math.sqrt(math.pi) * elasticity_func(relaxed_radius_func(z,self.Ru,self.Rd,self.L)) + tf.math.sqrt(A_0) * df_dr0
class find_derivatives_l(tf.keras.layers.Layer):
"""
Keras layers subclass to compute the derivative of l w.r.t z
"""
def __init__(self,R_network,q_network,Ru,Rd,L,**kwargs):
super().__init__(self,**kwargs)
#print("L")
self.Ru = Ru
self.Rd = Rd
self.L = L
self.grads = GradientLayer(R_network,q_network)
def call(self,input):
"""
Computes relaxed radius area and dl_dz
Returns:
A_0 : Relaxed radius area computed from pi*square(relaxed_radius)
dl_dz: The derivative of l w.r.t z
"""
z = input[0]
t = input[1]
#concat = tf.keras.layers.Concatenate()([z,t])
#print(z)
with tf.GradientTape() as g3:
g3.watch(z)
concat = tf.keras.layers.Concatenate()([z,t])
A,q,_,_,_,_ = self.grads(concat)
#print(tx[0])
r0 = relaxed_radius_func(z,self.Ru,self.Rd,self.L)
A_0 = math.pi * (r0**2)
l=(q**2)/A + elasticity_func(r0)*tf.sqrt(A_0*A)
dl_dtx = g3.batch_jacobian(l,z)
dl_dz = dl_dtx[...,0]
return A_0,dl_dz
class p_class(tf.keras.layers.Layer):
"""
Keras layers to compute the pressure, and related values
"""
def __init__(self,R_model,q_model,Ru,Rd,L,E,h,**kwargs):
"""
Args:
R_model : Keras network model simulating R
q_model : Keras network model simulating q
"""
super().__init__(self,**kwargs)
self.R_model = R_model
self.q_model = q_model
self.Ru = Ru
self.Rd = Rd
self.L = L
self.E = E
self.h = h
self.grad = GradientLayer(self.R_model,self.q_model)
def call(self,input):
"""
Calculated the pressure, its derivative and related values
Returns:
dq_bo_dt : Derivative of q w.r.t t for the outflow boundary condition
p : Pressure calculated at the outflow boundary condition
dp_bo_dt : Derivative of p w.r.t t for the outflow boundary condition
q_bo : q calculated at the outflow boundary condition
"""
#print("P")
#print(type(self.Ru))
#print(type(self.Rd))
z_outflow = input[0]
t_outflow = input[1]
concat_layer = tf.keras.layers.Concatenate()([z_outflow,t_outflow])
with tf.GradientTape() as g:
#L,A_bndry_outfow,q_bndry_outflow,_,_,dq_bo_dt,_,_=self.grads(tx_bndry_outflow,elasticity_func)
g.watch(concat_layer)
A_bo,q_bo,R_bo,_,dq_bo_dt,_ = self.grad(concat_layer)
#A_bo_0 = Lambda(lambda x: math.pi * (relaxed_radius_func(x[0],int(x[1]),int(x[2]),int(x[3]))**2))((z_outflow,self.Ru,self.Rd,self.L))
A_bo_0 = math.pi * (relaxed_radius_func(z_outflow,self.Ru,self.Rd,self.L) ** 2)
p = (4/3)*((self.E*self.h)/relaxed_radius_func(z_outflow,self.Ru,self.Rd,self.L)) * (1 - tf.sqrt(A_bo_0/A_bo))
#p=1
dp_dtx_bo = g.batch_jacobian(p,concat_layer)
dp_bo_dt = dp_dtx_bo[...,1]
return dp_bo_dt,p,dq_bo_dt,q_bo
class find_derivatives_r0(tf.keras.layers.Layer):
def __init__(self,R_network,Ru,Rd,L,**kwargs):
super().__init__(self,**kwargs)
self.R_network = R_network
self.Ru = Ru
self.Rd = Rd
self.L = L
def call(self,z):
with tf.GradientTape() as g:
g.watch(z)
r0 = relaxed_radius_func(z,self.Ru,self.Rd,self.L)
dr0_dx = g.batch_jacobian(r0,z)[...,0]
return dr0_dx,r0
class find_derivatives_f0(tf.keras.layers.Layer):
def __init__(self,**kwargs):
super().__init__(self,**kwargs)
def call(self,input):
with tf.GradientTape() as g:
f0 = elasticity_func(input)
#print(f0)
df0_dr0 = g.batch_jacobian(f0,input)[...,0]
return df0_dr0
class artery:
def __init__(self, delta_b=2*math.pow(10,-3), Ru=0.37, Rd=0.37, L=20.8, Reynolds_no=4500, E=4.8, h=0.065, q_0=450,
length_domain=(0, 20.8), time_domain = (0,0.8), tow=.3, timeperiod=0.8,
layers=[50] * 9, activation='tanh', num_train_samples=100000):
self.delta_b = delta_b
self.Ru = Ru
self.Rd = Rd
self.L = L
self.length_domain = (0,L)
self.time_domain = (0,timeperiod)
self.tow = tow
self.q_0 = q_0
self.timeperiod = timeperiod
self.layers = layers
#self.bnd_cond = bnd_cond
self.activation = activation
self.num_train_samples = num_train_samples
self.R_network = Network.build(num_inputs = 2,layers=self.layers, activation=self.activation)
self.q_network = Network.build(num_inputs = 2,layers = self.layers, activation = self.activation)
self.pinn = PINN(self.R_network, self.q_network).build(self.delta_b, E, h, elasticity_func, 253/100, 139/100, 1.3384, Ru, Rd, L, Reynolds_no, q_0)
self.pinn.summary()
def create_dataset(self):
z = np.random.rand(self.num_train_samples, 1)*self.length_domain[1]
t = np.random.rand(self.num_train_samples,1)*self.time_domain[1]
z_inflow = np.zeros((self.num_train_samples,1))
t_inflow = np.random.rand(self.num_train_samples,1)*self.time_domain[1]
z_outflow = np.ones((self.num_train_samples, 1))*self.length_domain[1]
t_outflow = np.random.rand(self.num_train_samples,1)*self.time_domain[1]
#print(t_outflow)
x_train = [z,t,z_inflow,t_inflow,z_outflow,t_outflow]
#print(x_train.shape)
u_zero = np.zeros((self.num_train_samples, 1))
q_bndry_inflow = initial_q(t_outflow, self.timeperiod, self.tow,self.q_0)
#print(type(q_bndry_inflow))
#print(q_bndry_inflow.shape)
#q_bndry_inflow = np.zeros((self.num_train_samples,1))
u_bndry_outflow = np.zeros((self.num_train_samples,1))
y_train = [u_zero,q_bndry_inflow,u_bndry_outflow]
return x_train, y_train
def sci_train(self, first_order_trainer='rmsprop', batch_size=128, first_order_epochs=10,
factr=10, m=50, maxls=50, maxiter=15000):
x_train, y_train = self.create_dataset()
trainer = sci_Trainer(self.pinn, x_train, y_train, first_order_trainer=first_order_trainer, batch_size=batch_size,
first_order_epochs=first_order_epochs, factr=factr, m=m, maxls=maxls, maxiter=maxiter)
trainer.train()
return self.R_network, self.q_network
def tfp_trainer(self, first_order_trainer='rmsprop', batch_size=128, first_order_epochs=10,
factr=10, m=50, maxls=50, maxiter=15000):
x_train, y_train = self.create_dataset()
tfp_trainer = tfp_Trainer(self.pinn, x_train, y_train, first_order_trainer=first_order_trainer, batch_size=batch_size,
first_order_epochs=first_order_epochs, maxiter=maxiter)
result = tfp_trainer.train()
set_weights(tfp_trainer, self.pinn, result.position)
return self.networking
def plot_flow(self, num_test_samples=100):
plot(self.R_network, (0, self.L), self.time_domain, 'flow', num_test_samples)
def plot_radius(self, num_test_samples=100):
plot(self.q_network, (0, self.L), self.time_domain, 'radii', num_test_samples)
def initial_q(t,timeperiod,tow,q_0):
t=np.fmod(t,timeperiod)
#print(t)
t1 = np.exp(-np.power(t,2) / (2*(tow**2)))
#print(t1)
return ((q_0*t)/( (tow**2) * t1))/1000000
def relaxed_radius_func(z, Ru, Rd, L):
#print(z)
#print("Relrad")
#print(type(Rd))
#print(type(Ru))
Ru = float(Ru)
temp = tf.cast(tf.math.log(Rd/Ru),tf.float64,name=None)
#print(temp)
#print(type(temp))
return Ru*tf.exp(temp*(z/L))
def elasticity_func(r0):
return 2/3*r0
| [
"tensorflow.math.log",
"numpy.fmod",
"tensorflow.keras.layers.Concatenate",
"math.pow",
"math.sqrt",
"numpy.power",
"numpy.zeros",
"numpy.ones",
"tensorflow.keras.models.Model",
"tensorflow.math.sqrt",
"tensorflow.exp",
"tensorflow.keras.layers.Input",
"numpy.random.rand",
"tensorflow.kera... | [((12307, 12329), 'numpy.fmod', 'np.fmod', (['t', 'timeperiod'], {}), '(t, timeperiod)\n', (12314, 12329), True, 'import numpy as np\n'), ((2467, 2500), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (2488, 2500), True, 'import tensorflow as tf\n'), ((2508, 2541), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (2529, 2541), True, 'import tensorflow as tf\n'), ((3741, 3774), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (3762, 3774), True, 'import tensorflow as tf\n'), ((3791, 3824), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (3812, 3824), True, 'import tensorflow as tf\n'), ((4008, 4041), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (4029, 4041), True, 'import tensorflow as tf\n'), ((4059, 4092), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (4080, 4092), True, 'import tensorflow as tf\n'), ((4398, 4530), 'tensorflow.keras.models.Model', 'tf.keras.models.Model', ([], {'inputs': '[z, t, z_inflow, t_inflow, z_outflow, t_outflow]', 'outputs': '[u_eqn, q_bndry_inflow, u_bndry_outflow]'}), '(inputs=[z, t, z_inflow, t_inflow, z_outflow,\n t_outflow], outputs=[u_eqn, q_bndry_inflow, u_bndry_outflow])\n', (4419, 4530), True, 'import tensorflow as tf\n'), ((9976, 10013), 'numpy.zeros', 'np.zeros', (['(self.num_train_samples, 1)'], {}), '((self.num_train_samples, 1))\n', (9984, 10013), True, 'import numpy as np\n'), ((10421, 10458), 'numpy.zeros', 'np.zeros', (['(self.num_train_samples, 1)'], {}), '((self.num_train_samples, 1))\n', (10429, 10458), True, 'import numpy as np\n'), ((10709, 10746), 'numpy.zeros', 'np.zeros', (['(self.num_train_samples, 1)'], {}), '((self.num_train_samples, 1))\n', (10717, 10746), True, 'import numpy as np\n'), ((12598, 12618), 'tensorflow.math.log', 'tf.math.log', (['(Rd / Ru)'], {}), '(Rd / Ru)\n', (12609, 12618), True, 'import tensorflow as tf\n'), ((12690, 12712), 'tensorflow.exp', 'tf.exp', (['(temp * (z / L))'], {}), '(temp * (z / L))\n', (12696, 12712), True, 'import tensorflow as tf\n'), ((1525, 1542), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (1540, 1542), True, 'import tensorflow as tf\n'), ((1731, 1748), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (1746, 1748), True, 'import tensorflow as tf\n'), ((2614, 2643), 'tensorflow.keras.layers.Concatenate', 'tf.keras.layers.Concatenate', ([], {}), '()\n', (2641, 2643), True, 'import tensorflow as tf\n'), ((3122, 3209), 'tensorflow.keras.layers.Lambda', 'Lambda', (['(lambda ar: -(2 * math.pi * ar[0] * ar[1]) / (delta_b * Reynolds_no * ar[2]))'], {}), '(lambda ar: -(2 * math.pi * ar[0] * ar[1]) / (delta_b * Reynolds_no *\n ar[2]))\n', (3128, 3209), False, 'from tensorflow.keras.layers import Lambda\n'), ((3848, 3877), 'tensorflow.keras.layers.Concatenate', 'tf.keras.layers.Concatenate', ([], {}), '()\n', (3875, 3877), True, 'import tensorflow as tf\n'), ((5624, 5641), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (5639, 5641), True, 'import tensorflow as tf\n'), ((7125, 7154), 'tensorflow.keras.layers.Concatenate', 'tf.keras.layers.Concatenate', ([], {}), '()\n', (7152, 7154), True, 'import tensorflow as tf\n'), ((7188, 7205), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (7203, 7205), True, 'import tensorflow as tf\n'), ((8149, 8166), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (8164, 8166), True, 'import tensorflow as tf\n'), ((8478, 8495), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (8493, 8495), True, 'import tensorflow as tf\n'), ((8685, 8701), 'math.pow', 'math.pow', (['(10)', '(-3)'], {}), '(10, -3)\n', (8693, 8701), False, 'import math\n'), ((9818, 9859), 'numpy.random.rand', 'np.random.rand', (['self.num_train_samples', '(1)'], {}), '(self.num_train_samples, 1)\n', (9832, 9859), True, 'import numpy as np\n'), ((9895, 9936), 'numpy.random.rand', 'np.random.rand', (['self.num_train_samples', '(1)'], {}), '(self.num_train_samples, 1)\n', (9909, 9936), True, 'import numpy as np\n'), ((10033, 10074), 'numpy.random.rand', 'np.random.rand', (['self.num_train_samples', '(1)'], {}), '(self.num_train_samples, 1)\n', (10047, 10074), True, 'import numpy as np\n'), ((10115, 10151), 'numpy.ones', 'np.ones', (['(self.num_train_samples, 1)'], {}), '((self.num_train_samples, 1))\n', (10122, 10151), True, 'import numpy as np\n'), ((10195, 10236), 'numpy.random.rand', 'np.random.rand', (['self.num_train_samples', '(1)'], {}), '(self.num_train_samples, 1)\n', (10209, 10236), True, 'import numpy as np\n'), ((4829, 4847), 'math.sqrt', 'math.sqrt', (['math.pi'], {}), '(math.pi)\n', (4838, 4847), False, 'import math\n'), ((4915, 4932), 'tensorflow.math.sqrt', 'tf.math.sqrt', (['A_0'], {}), '(A_0)\n', (4927, 4932), True, 'import tensorflow as tf\n'), ((5684, 5713), 'tensorflow.keras.layers.Concatenate', 'tf.keras.layers.Concatenate', ([], {}), '()\n', (5711, 5713), True, 'import tensorflow as tf\n'), ((12358, 12372), 'numpy.power', 'np.power', (['t', '(2)'], {}), '(t, 2)\n', (12366, 12372), True, 'import numpy as np\n'), ((3615, 3652), 'tensorflow.math.pow', 'tf.math.pow', (['(ar[0] + ar[1] - ar[2])', '(2)'], {}), '(ar[0] + ar[1] - ar[2], 2)\n', (3626, 3652), True, 'import tensorflow as tf\n'), ((5911, 5927), 'tensorflow.sqrt', 'tf.sqrt', (['(A_0 * A)'], {}), '(A_0 * A)\n', (5918, 5927), True, 'import tensorflow as tf\n'), ((7732, 7754), 'tensorflow.sqrt', 'tf.sqrt', (['(A_bo_0 / A_bo)'], {}), '(A_bo_0 / A_bo)\n', (7739, 7754), True, 'import tensorflow as tf\n'), ((3514, 3533), 'tensorflow.math.sqrt', 'tf.math.sqrt', (['ar[0]'], {}), '(ar[0])\n', (3526, 3533), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from . import frameworkBase
from . import mcFramework
import os
import random
import sys
import shutil
from . import dynamicFramework
import numpy
from numpy import linalg
import pickle
from .frameworkBase import generateNameT, generateNameS, generateNameST
## \brief Framework for particle filter runs
class EnsKalmanFilterFramework(frameworkBase.FrameworkBase):
## \brief Constructor
def __init__(self, userModel):
frameworkBase.FrameworkBase.__init__(self)
self._d_model = userModel
self._testRequirements()
self._d_totalTimesteps = self._userModel().nrTimeSteps()
self._d_trackCloned = {}
# adding framework specific attributes and methods
self._addAttributeToClass("_d_filterPeriod", 0)
self._addAttributeToClass("_d_inFilterPeriod", False)
self._addAttributeToClass("_d_filterTimesteps", [])
self._addAttributeToClass("_d_inResume", False)
self._addAttributeToClass("_d_inUpdateWeight", False)
self._resetSampleWeights()
self._addMethodToClass(self.getStateVector)
self._addMethodToClass(self._runPremcloop)
self._addMethodToClass(self._runPostmcloop)
self._addMethodToClass(self.readmap)
self._addMethodToClass(self.readDeterministic)
self._addMethodToClass(self.setMeasurementOperator)
self._addMethodToClass(self.setObservedMatrices)
# \todo !!!test if filter timesteps are in interval of model timesteps...
self.sizeStateVector = 0
self._initialiseObservedDir()
def setObservedMatrices(self, observations, covariance):
assert type(observations) == numpy.ndarray
assert type(covariance) == numpy.ndarray
filtermoment = self._userModel().currentTimeStep()
fileName = os.path.join("observedState",'obs%s.tmp' % (filtermoment))
file = open(fileName, 'wb')
pickle.dump(observations, file)
file.close()
fileName = os.path.join("observedState",'cov%s.tmp' % (filtermoment))
file = open(fileName, 'wb')
pickle.dump(covariance, file)
file.close()
## \brief Setting the measurement operator for an update moment
#
# If this is not used the identity matrix will be used
def setMeasurementOperator(self, matrix):
assert type(matrix) == numpy.ndarray
filtermoment = self._userModel().currentTimeStep()
fileName = os.path.join("observedState",'h%s.tmp' % (filtermoment))
file = open(fileName, 'wb')
pickle.dump(matrix, file)
file.close()
def _testRequirements(self):
#\todo test to dynamic framework model
if not isinstance(self._d_model, mcFramework.MonteCarloFramework):
self.showError("Model must be instance of MonteCarloFramework.")
sys.exit()
if not hasattr(self._d_model, 'run'):
self.showError("No 'run' section defined.")
sys.exit()
if not hasattr(self._userModel(), 'setState'):
self.showError("No 'setState' function defined.")
sys.exit()
if not hasattr(self._userModel(), 'resume'):
msg = "Cannot run particle filter framework: Implement 'resume' method"
raise frameworkBase.FrameworkError(msg)
def _particleWeights(self):
return self._userModel()._d_particleWeights
def _userModel(self):
return self._d_model._userModel()
def _initialiseObservedDir(self):
varName = "observedState"
if not os.path.isdir(varName):
# Create sample directory.
os.mkdir(varName)
else :
#if not os.path.isdir(varName):
# # Remove existing file with name of sample directory.
shutil.rmtree(varName)
os.mkdir(varName)
## \brief Creates the subdirectories for state variables
# \todo test if mc dirs are there...
def _initialiseStateDir(self):
varName = "stateVector"
if not os.path.isdir(varName):
# Create sample directory.
os.mkdir(varName)
else :
#if not os.path.isdir(varName):
# # Remove existing file with name of sample directory.
shutil.rmtree(varName)
os.mkdir(varName)
## \brief Creates the subdirectories for state variables
# \todo test if mc dirs are there...
def _initialiseSampleDirectories(self):
sample = self._userModel()._firstSampleNumber()
while sample <= self._userModel()._lastSampleNumber():
cwd = os.getcwd()
dirname = "%d" % (sample)
varName = "stateVar"
os.chdir(dirname)
if not os.path.isdir(varName):
# Create sample directory.
os.mkdir(varName)
else :
#if not os.path.isdir(varName):
# # Remove existing file with name of sample directory.
os.remove(varName)
os.mkdir(varName)
os.chdir(cwd)
assert os.path.exists(os.path.join(dirname,"stateVar")) and os.path.isdir(os.path.join(dirname,"stateVar"))
sample += 1
## \brief Setting the filter moments
def setFilterTimesteps(self, filterTimesteps):
assert type(filterTimesteps) == list or type(filterTimesteps) == numpy.ndarray
#assert type(filterTimesteps) == list
# \todo assert some more
for filtertimestep in filterTimesteps:
assert filtertimestep < self._userModel().nrTimeSteps()
self._userModel()._d_filterTimesteps = filterTimesteps
## \brief Returns a list of filter moments
def filterTimesteps(self):
return self._userModel()._d_filterTimesteps
## \brief Re-implemented from ShellScript.
#
# Runs the user model in the filter mode.
def run(self):
if(hasattr(self._userModel(), 'run')):
self._userModel().run()
else:
self._atStartOfScript()
self._initialiseStateDir()
self._initialiseSampleDirectories()
lastPeriod = len(self._userModel()._d_filterTimesteps)
if lastPeriod == 0:
self.showError("No filter timesteps specified")
sys.exit()
# set the proposal/initial weight distribution by user
if hasattr(self._userModel(), 'setInitialParticleWeights'):
self._userModel()._d_particleWeights = self._userModel().setInitialParticleWeights()
# check initial weights
assert type(self._particleWeights()) == list
assert len(self._particleWeights()) == self._userModel().nrSamples()
for i in range(0, len(self._particleWeights())):
assert type(self._particleWeights()[i]) == float
# run the premc loop
self._userModel()._runPremcloop()
# looping over the filter periods
for currentPeriod in range(0, len(self._userModel()._d_filterTimesteps) + 1):
# \todo replace with a better solution...
sumW = sum(self._particleWeights())
assert abs(sumW - 1.0) < 0.00001
self._runMonteCarlo(currentPeriod, lastPeriod)
if not currentPeriod == lastPeriod:
# retrieve the state vectors for each sample
for sample in range(1, self._userModel().nrSamples() + 1):
self._userModel()._setCurrentSample(sample)
self._userModel()._d_inUpdateWeight = True
stateVector = self._userModel().setState()
self._userModel()._d_inUpdateWeight = False
assert type(stateVector) == numpy.ndarray
fileName = os.path.join("stateVector",'ensMember%s.tmp' %(sample))
file = open(fileName,'wb')
pickle.dump(stateVector, file)
file.close()
# for current update moment
self._getObservedValues()
self._kalmanFilter()
currentPeriod += 1
self._userModel()._d_filterPeriod += 1
self._userModel()._setFirstTimeStep(1)
self._userModel()._runPostmcloop()
return 0
def _getObservedValues(self):
self._userModel().setObservations()
def _kalmanFilter(self):
# following equations 44-52 from <NAME>'s paper
# 'The Ensemble Kalman Filter: theoretical formulation
# and practical implemetation'
#
# n size of state vector (sizeStateVector)
# m nr of observations (sizeObservedVector)
# N nr of ensemble members
#
# A matrix with model states
# H matrix 'measurement operator'
# D matrix with observations
fileName = os.path.join("stateVector",'ensMember%s.tmp' %(str(1)))
file = open(fileName,'rb')
vec = pickle.load(file)
sizeStateVector = len(vec)
file.close()
# length of the observed vector \todo do we know that?
fileName = os.path.join("observedState","obs%s.tmp" %(self._userModel()._d_filterTimesteps[self._userModel()._d_filterPeriod]))
file = open(fileName,'rb')
vec = pickle.load(file)
sizeObservedVector = len(vec)
file.close()
nrEnsembleMembers = self._userModel().nrSamples()
# create A
A = numpy.zeros((sizeStateVector, nrEnsembleMembers), dtype=float)
# \todo is there a better way to construct a matrix from vecors?
for sample in range(1, self._userModel().nrSamples() + 1):
fileName = os.path.join("stateVector",'ensMember%s.tmp' %(sample))
file = open(fileName,'rb')
vec = pickle.load(file)
file.close()
for i in range(0, sizeStateVector):
A[i,sample-1] = vec[i]
# obtain H specified by user
fileName = os.path.join("observedState","h%s.tmp" %(self._userModel()._d_filterTimesteps[self._userModel()._d_filterPeriod]))
if os.path.exists(fileName):
file = open(fileName,'rb')
H = pickle.load(file)
file.close()
else:
# or use the identiy matrix
H = numpy.eye(sizeObservedVector, sizeStateVector, dtype=float)
assert H.shape == (sizeObservedVector, sizeStateVector), "Shape of provided matrix H %s does not match (%s, %s)" %(H.shape, sizeObservedVector, sizeStateVector)
# obtain D
fileName = os.path.join("observedState","obs%s.tmp" %(self._userModel()._d_filterTimesteps[self._userModel()._d_filterPeriod]))
file = open(fileName, 'rb')
D = pickle.load(file)
file.close()
assert D.shape == (sizeObservedVector, nrEnsembleMembers), "Shape of provided matrix D %s does not match (%s, %s)" %(D.shape, sizeObservedVector, nrEnsembleMembers)
# obtain error covariance matrix
fileName = os.path.join("observedState","cov%s.tmp" %(self._userModel()._d_filterTimesteps[self._userModel()._d_filterPeriod]))
file = open(fileName, 'rb')
Re = pickle.load(file)
file.close()
assert Re.shape == (sizeObservedVector, sizeObservedVector), "Shape of provided matrix Re %s does not match (%s, %s)" %(Re.shape, sizeObservedVector, sizeObservedVector)
# calculate Pe
Abar = numpy.dot(A,numpy.array( [[1.0/nrEnsembleMembers] * nrEnsembleMembers ] * nrEnsembleMembers, dtype=float))
Ad = A - Abar
Pe = 1.0/(nrEnsembleMembers - 1) * numpy.dot(Ad,numpy.transpose(Ad))
# calculate the new A matrix
DmAH = D - numpy.dot(H,A)
PeHt = numpy.dot(Pe,numpy.transpose(H))
HPeHt = numpy.dot(H, PeHt)
HPeHtpRe = HPeHt + Re
INV = linalg.pinv(HPeHtpRe)
INVDmAH = numpy.dot(INV, DmAH)
A = A + numpy.dot(PeHt, INVDmAH)
for sample in range(1, self._userModel().nrSamples() + 1):
fileName = os.path.join("stateVector",'a%s.tmp' %(sample))
file = open(fileName,'wb')
index = sample - 1
vec = A[:,index]
pickle.dump(vec, file)
file.close()
## \brief Returns the updated variables
def getStateVector(self, sampleNumber):
fileName = os.path.join("stateVector",'a%s.tmp' %(sampleNumber))
file = open(fileName,'rb')
vec = pickle.load(file)
file.close()
return vec
def _normaliseWeights(self, weights):
assert weights
sumWeights = sum(weights)
norm = [0.0] * len(weights)
for i in range(0, len(weights)):
norm[i] = weights[i] / sumWeights
return norm
def _resetSampleWeights(self):
assert self._userModel().nrSamples() > 0
self._userModel()._d_particleWeights = [1.0 / self._userModel().nrSamples()] * self._userModel().nrSamples()
def _cumulativeWeights(self, weights):
cumulative = [0.0] * self._userModel().nrSamples()
value = 0.0
for i in range(len(weights)):
value += weights[i]
cumulative[i] = value
return cumulative
def _startEndOfPeriod(self, currentPeriod, lastPeriod):
# determine start end end timestep of current period
if currentPeriod == 0:
startTimestep = 1
endTimestep = self._userModel()._d_filterTimesteps[currentPeriod]
elif currentPeriod == lastPeriod:
startTimestep = self._userModel()._d_filterTimesteps[currentPeriod -1] + 1
endTimestep = self._d_totalTimesteps
else:
startTimestep = self._userModel()._d_filterTimesteps[currentPeriod - 1] + 1
endTimestep = self._userModel()._d_filterTimesteps[currentPeriod]
assert startTimestep <= endTimestep
return startTimestep, endTimestep
def _executePrePostMc(self, currentPeriod, lastPeriod):
if currentPeriod == 0:
# execute premc
premc = True
postmc = False
elif currentPeriod == lastPeriod:
# execute postmc
premc = False
postmc = True
else:
# without pre/postmc
premc = False
postmc = False
# \todo assert something
return premc, postmc
def _runMonteCarlo(self, currentPeriod, lastPeriod):
# get user model and (re)set start and end time
startTimestep, endTimestep = self._startEndOfPeriod(currentPeriod, lastPeriod)
self._userModel()._setNrTimeSteps(endTimestep)
self._userModel()._setFirstTimeStep(startTimestep)
self._userModel()._setCurrentTimeStep(endTimestep)
# run the model in mc mode for current filter period
self._incrementIndentLevel()
self._atStartOfFilterPeriod(currentPeriod)
self._d_model.run(False, False)
self._atEndOfFilterPeriod()
self._decrementIndentLevel()
## \brief reading sample data from disk
# returns the map of the current time step from the current sample directory
def readmap(self, name):
return self._readmapNew(name)
## \brief reading deterministic data from disk
# returns the map of the current time step from the current working directory
def readDeterministic(self, name):
if self._userModel()._inPremc() or self._userModel()._inPostmc() or self._userModel()._inInitial():
newName = name + ".map"
else:
newName = generateNameT(name, self._userModel().currentTimeStep())
import pcraster
return pcraster.readmap(newName)
| [
"pcraster.readmap",
"os.mkdir",
"pickle.dump",
"os.remove",
"numpy.eye",
"shutil.rmtree",
"os.path.isdir",
"os.getcwd",
"numpy.zeros",
"os.path.exists",
"numpy.transpose",
"numpy.linalg.pinv",
"pickle.load",
"numpy.array",
"numpy.dot",
"os.path.join",
"os.chdir",
"sys.exit"
] | [((1739, 1796), 'os.path.join', 'os.path.join', (['"""observedState"""', "('obs%s.tmp' % filtermoment)"], {}), "('observedState', 'obs%s.tmp' % filtermoment)\n", (1751, 1796), False, 'import os\n'), ((1834, 1865), 'pickle.dump', 'pickle.dump', (['observations', 'file'], {}), '(observations, file)\n', (1845, 1865), False, 'import pickle\n'), ((1899, 1956), 'os.path.join', 'os.path.join', (['"""observedState"""', "('cov%s.tmp' % filtermoment)"], {}), "('observedState', 'cov%s.tmp' % filtermoment)\n", (1911, 1956), False, 'import os\n'), ((1994, 2023), 'pickle.dump', 'pickle.dump', (['covariance', 'file'], {}), '(covariance, file)\n', (2005, 2023), False, 'import pickle\n'), ((2326, 2381), 'os.path.join', 'os.path.join', (['"""observedState"""', "('h%s.tmp' % filtermoment)"], {}), "('observedState', 'h%s.tmp' % filtermoment)\n", (2338, 2381), False, 'import os\n'), ((2419, 2444), 'pickle.dump', 'pickle.dump', (['matrix', 'file'], {}), '(matrix, file)\n', (2430, 2444), False, 'import pickle\n'), ((8182, 8199), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (8193, 8199), False, 'import pickle\n'), ((8480, 8497), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (8491, 8497), False, 'import pickle\n'), ((8630, 8692), 'numpy.zeros', 'numpy.zeros', (['(sizeStateVector, nrEnsembleMembers)'], {'dtype': 'float'}), '((sizeStateVector, nrEnsembleMembers), dtype=float)\n', (8641, 8692), False, 'import numpy\n'), ((9226, 9250), 'os.path.exists', 'os.path.exists', (['fileName'], {}), '(fileName)\n', (9240, 9250), False, 'import os\n'), ((9801, 9818), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (9812, 9818), False, 'import pickle\n'), ((10217, 10234), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (10228, 10234), False, 'import pickle\n'), ((10779, 10797), 'numpy.dot', 'numpy.dot', (['H', 'PeHt'], {}), '(H, PeHt)\n', (10788, 10797), False, 'import numpy\n'), ((10836, 10857), 'numpy.linalg.pinv', 'linalg.pinv', (['HPeHtpRe'], {}), '(HPeHtpRe)\n', (10847, 10857), False, 'from numpy import linalg\n'), ((10874, 10894), 'numpy.dot', 'numpy.dot', (['INV', 'DmAH'], {}), '(INV, DmAH)\n', (10883, 10894), False, 'import numpy\n'), ((11294, 11347), 'os.path.join', 'os.path.join', (['"""stateVector"""', "('a%s.tmp' % sampleNumber)"], {}), "('stateVector', 'a%s.tmp' % sampleNumber)\n", (11306, 11347), False, 'import os\n'), ((11389, 11406), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (11400, 11406), False, 'import pickle\n'), ((14301, 14326), 'pcraster.readmap', 'pcraster.readmap', (['newName'], {}), '(newName)\n', (14317, 14326), False, 'import pcraster\n'), ((2687, 2697), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2695, 2697), False, 'import sys\n'), ((2797, 2807), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2805, 2807), False, 'import sys\n'), ((2922, 2932), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2930, 2932), False, 'import sys\n'), ((3330, 3352), 'os.path.isdir', 'os.path.isdir', (['varName'], {}), '(varName)\n', (3343, 3352), False, 'import os\n'), ((3393, 3410), 'os.mkdir', 'os.mkdir', (['varName'], {}), '(varName)\n', (3401, 3410), False, 'import os\n'), ((3529, 3551), 'shutil.rmtree', 'shutil.rmtree', (['varName'], {}), '(varName)\n', (3542, 3551), False, 'import shutil\n'), ((3558, 3575), 'os.mkdir', 'os.mkdir', (['varName'], {}), '(varName)\n', (3566, 3575), False, 'import os\n'), ((3752, 3774), 'os.path.isdir', 'os.path.isdir', (['varName'], {}), '(varName)\n', (3765, 3774), False, 'import os\n'), ((3815, 3832), 'os.mkdir', 'os.mkdir', (['varName'], {}), '(varName)\n', (3823, 3832), False, 'import os\n'), ((3951, 3973), 'shutil.rmtree', 'shutil.rmtree', (['varName'], {}), '(varName)\n', (3964, 3973), False, 'import shutil\n'), ((3980, 3997), 'os.mkdir', 'os.mkdir', (['varName'], {}), '(varName)\n', (3988, 3997), False, 'import os\n'), ((4263, 4274), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4272, 4274), False, 'import os\n'), ((4341, 4358), 'os.chdir', 'os.chdir', (['dirname'], {}), '(dirname)\n', (4349, 4358), False, 'import os\n'), ((4636, 4649), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (4644, 4649), False, 'import os\n'), ((8843, 8898), 'os.path.join', 'os.path.join', (['"""stateVector"""', "('ensMember%s.tmp' % sample)"], {}), "('stateVector', 'ensMember%s.tmp' % sample)\n", (8855, 8898), False, 'import os\n'), ((8944, 8961), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (8955, 8961), False, 'import pickle\n'), ((9295, 9312), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (9306, 9312), False, 'import pickle\n'), ((9386, 9445), 'numpy.eye', 'numpy.eye', (['sizeObservedVector', 'sizeStateVector'], {'dtype': 'float'}), '(sizeObservedVector, sizeStateVector, dtype=float)\n', (9395, 9445), False, 'import numpy\n'), ((10470, 10567), 'numpy.array', 'numpy.array', (['([[1.0 / nrEnsembleMembers] * nrEnsembleMembers] * nrEnsembleMembers)'], {'dtype': 'float'}), '([[1.0 / nrEnsembleMembers] * nrEnsembleMembers] *\n nrEnsembleMembers, dtype=float)\n', (10481, 10567), False, 'import numpy\n'), ((10706, 10721), 'numpy.dot', 'numpy.dot', (['H', 'A'], {}), '(H, A)\n', (10715, 10721), False, 'import numpy\n'), ((10746, 10764), 'numpy.transpose', 'numpy.transpose', (['H'], {}), '(H)\n', (10761, 10764), False, 'import numpy\n'), ((10909, 10933), 'numpy.dot', 'numpy.dot', (['PeHt', 'INVDmAH'], {}), '(PeHt, INVDmAH)\n', (10918, 10933), False, 'import numpy\n'), ((11016, 11063), 'os.path.join', 'os.path.join', (['"""stateVector"""', "('a%s.tmp' % sample)"], {}), "('stateVector', 'a%s.tmp' % sample)\n", (11028, 11063), False, 'import os\n'), ((11152, 11174), 'pickle.dump', 'pickle.dump', (['vec', 'file'], {}), '(vec, file)\n', (11163, 11174), False, 'import pickle\n'), ((4373, 4395), 'os.path.isdir', 'os.path.isdir', (['varName'], {}), '(varName)\n', (4386, 4395), False, 'import os\n'), ((4440, 4457), 'os.mkdir', 'os.mkdir', (['varName'], {}), '(varName)\n', (4448, 4457), False, 'import os\n'), ((4584, 4602), 'os.remove', 'os.remove', (['varName'], {}), '(varName)\n', (4593, 4602), False, 'import os\n'), ((4611, 4628), 'os.mkdir', 'os.mkdir', (['varName'], {}), '(varName)\n', (4619, 4628), False, 'import os\n'), ((5764, 5774), 'sys.exit', 'sys.exit', ([], {}), '()\n', (5772, 5774), False, 'import sys\n'), ((10636, 10655), 'numpy.transpose', 'numpy.transpose', (['Ad'], {}), '(Ad)\n', (10651, 10655), False, 'import numpy\n'), ((4678, 4711), 'os.path.join', 'os.path.join', (['dirname', '"""stateVar"""'], {}), "(dirname, 'stateVar')\n", (4690, 4711), False, 'import os\n'), ((4730, 4763), 'os.path.join', 'os.path.join', (['dirname', '"""stateVar"""'], {}), "(dirname, 'stateVar')\n", (4742, 4763), False, 'import os\n'), ((7133, 7188), 'os.path.join', 'os.path.join', (['"""stateVector"""', "('ensMember%s.tmp' % sample)"], {}), "('stateVector', 'ensMember%s.tmp' % sample)\n", (7145, 7188), False, 'import os\n'), ((7240, 7270), 'pickle.dump', 'pickle.dump', (['stateVector', 'file'], {}), '(stateVector, file)\n', (7251, 7270), False, 'import pickle\n')] |
import operator as op
import unittest
import typing
from unittest.mock import call, patch, ANY
import numpy as np
import pandas as pd
from . import Tracer
class BaseTest(unittest.TestCase):
def setUp(self):
patcher = patch("record_api.core.log_call")
self.mock = patcher.start()
self.addCleanup(patcher.stop)
self.maxDiff = None
def trace(self, source: str):
"""
use exec so that it is called in child scope.
alternatively could use IIFE but this is more verbose
in the tests
"""
with self.tracer: # type: ignore
exec(source)
def assertCalls(self, *calls):
self.assertListEqual(
self.mock.mock_calls, [*calls],
)
class TestMockNumPyMethod(BaseTest):
def setUp(self):
super().setUp()
self.a = np.arange(10)
self.tracer = Tracer(["numpy"], ["record_api.test"])
def test_pos(self):
self.trace("+self.a")
self.mock.assert_called_once_with(
ANY, op.pos, (self.a,),
)
def test_neg(self):
self.trace("-self.a")
self.mock.assert_called_once_with(
ANY, op.neg, (self.a,),
)
def test_invert(self):
self.trace("~self.a")
self.mock.assert_called_once_with(
ANY, op.invert, (self.a,),
)
def test_add(self):
self.trace("self.a + 10")
self.mock.assert_called_once_with(
ANY, op.add, (self.a, 10),
)
def test_radd(self):
# verify regular add doesn't add
self.trace("10 + 10")
self.trace("10 + self.a")
self.mock.assert_called_once_with(
ANY, op.add, (10, self.a),
)
def test_iadd(self):
self.trace("self.a += 10")
self.mock.assert_called_once_with(
ANY, op.iadd, (self.a, 10),
)
def test_getitem(self):
# verify regular getitem doesnt trigger
self.trace("[self.a][0]")
self.trace("self.a[0]")
self.mock.assert_called_once_with(
ANY, op.getitem, (self.a, 0),
)
def test_setitem(self):
# verify regular setitem doesnt trigger
self.trace("l = [0]\nl[0] = self.a")
self.trace("self.a[0] = 1")
self.mock.assert_called_once_with(
ANY, op.setitem, (self.a, 0, 1),
)
def test_setattr(self):
self.trace("self.a.shape = (10, 1)")
# Verify normal setattr doesn't trigger
self.trace("o = lambda: None\no.something = self.a")
self.mock.assert_called_once_with(
ANY, setattr, (self.a, "shape", (10, 1)),
)
def test_tuple_unpack(self):
self.trace("(*self.a, 10, *self.a)")
iter_ = call(ANY, iter, (self.a,))
self.assertCalls(iter_, iter_)
def test_tuple_unpack_with_call(self):
self.trace("def f(*args): pass\nf(*self.a, 10, *self.a)")
iter_ = call(ANY, iter, (self.a,))
self.assertCalls(iter_, iter_)
def test_load_attr(self):
# verify normal object doesn't trigger
self.trace("o = lambda: None\no.shape = self.a\no.shape")
self.trace("self.a.shape")
self.mock.assert_called_once_with(
ANY, getattr, (self.a, "shape"),
)
def test_arange(self):
self.trace("np.arange(10)")
self.mock.assert_called_once_with(
ANY, np.arange, (10,),
)
def test_arange_in_fn(self):
self.trace("(lambda: np.arange(10))()")
self.mock.assert_called_once_with(
ANY, np.arange, (10,),
)
def test_power(self):
self.trace("np.power(100, 10)")
self.mock.assert_called_once_with(
ANY, np.power, (100, 10),
)
def test_sort(self):
self.trace("self.a.sort(axis=0)")
self.assertCalls(
call(ANY, getattr, (self.a, "sort")),
call(ANY, self.a.sort, (), {"axis": 0}),
)
def test_eye(self):
self.trace("np.eye(10, order='F')")
self.assertCalls(
call(ANY, getattr, (np, "eye")), call(ANY, np.eye, (10,), {"order": "F"}),
)
def test_linspace(self):
self.trace("np.linspace(3, 4, endpoint=False)")
self.assertCalls(
call(ANY, getattr, (np, "linspace",)),
call(ANY, np.linspace, (3, 4,), {"endpoint": False}),
)
def test_reshape(self):
self.trace("self.a.reshape((5, 2))")
self.assertCalls(call(ANY, np.ndarray.reshape, (self.a, (5, 2),),))
def test_transpose(self):
self.trace("self.a.T")
self.assertCalls(call(ANY, getattr, (self.a, "T")))
def test_concatenate(self):
self.trace("np.concatenate((self.a, self.a), axis=0)")
self.assertCalls(
call(ANY, getattr, (np, "concatenate",)),
call(ANY, np.concatenate, ((self.a, self.a),), {"axis": 0}),
)
def test_ravel_list(self):
"""
from numeric function to test array dispatch
"""
self.trace("np.ravel([1, 2, 3])")
self.assertCalls(call(ANY, np.ravel, ([1, 2, 3],)))
def test_ravel_array(self):
"""
from numeric function to test array dispatch
"""
self.trace("np.ravel(self.a,)")
self.assertCalls(call(ANY, np.ravel, (self.a,)))
def test_std(self):
self.trace("np.std(self.a,)")
self.assertCalls(call(ANY, np.std, (self.a,)))
def test_builtin_types_no_call(self):
self.trace("10 + 10\n12323.234 - 2342.40")
self.mock.assert_not_called()
def test_numpy_array_constructor(self):
self.trace("np.ndarray(dtype='int64', shape=tuple())")
self.assertCalls(
call(ANY, getattr, (np, "ndarray")),
call(ANY, np.ndarray, (), {"dtype": "int64", "shape": tuple()}),
)
def test_not_contains(self):
self.trace("1 not in self.a")
self.assertCalls(call(ANY, op.contains, (self.a, 1)))
def test_reduction(self):
self.trace("np.add.reduce(self.a,)")
self.assertCalls(
call(ANY, getattr, (np, "add")),
call(ANY, np.ufunc.reduce, (np.add, self.a)),
)
def test_method(self):
self.trace("self.a.sum()")
self.assertCalls(call(ANY, np.ndarray.sum, (self.a,)))
def test_method_unbound(self):
self.trace("np.ndarray.sum(self.a,)")
self.assertCalls(
call(ANY, getattr, (np, "ndarray")), call(ANY, np.ndarray.sum, (self.a,))
)
def test_contains(self):
self.trace("1 in self.a")
self.trace("self.a in []")
self.mock.assert_called_once_with(
ANY, op.contains, (self.a, 1),
)
class TestMockPandasMethod(BaseTest):
def setUp(self):
super().setUp()
self.tracer = Tracer(["pandas"], ["record_api.test"])
self.df = pd.DataFrame.from_records([{"hi": 1}])
def test_from_records(self):
self.trace("pd.DataFrame.from_records([{'hi': 1}])")
self.assertCalls(
call(ANY, getattr, (pd, "DataFrame")),
call(ANY, pd.DataFrame.from_records, ([{"hi": 1}],)),
)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"unittest.mock.patch",
"numpy.arange",
"pandas.DataFrame.from_records",
"unittest.mock.call"
] | [((7255, 7270), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7268, 7270), False, 'import unittest\n'), ((233, 266), 'unittest.mock.patch', 'patch', (['"""record_api.core.log_call"""'], {}), "('record_api.core.log_call')\n", (238, 266), False, 'from unittest.mock import call, patch, ANY\n'), ((854, 867), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (863, 867), True, 'import numpy as np\n'), ((2774, 2800), 'unittest.mock.call', 'call', (['ANY', 'iter', '(self.a,)'], {}), '(ANY, iter, (self.a,))\n', (2778, 2800), False, 'from unittest.mock import call, patch, ANY\n'), ((2966, 2992), 'unittest.mock.call', 'call', (['ANY', 'iter', '(self.a,)'], {}), '(ANY, iter, (self.a,))\n', (2970, 2992), False, 'from unittest.mock import call, patch, ANY\n'), ((6935, 6973), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (["[{'hi': 1}]"], {}), "([{'hi': 1}])\n", (6960, 6973), True, 'import pandas as pd\n'), ((3895, 3931), 'unittest.mock.call', 'call', (['ANY', 'getattr', "(self.a, 'sort')"], {}), "(ANY, getattr, (self.a, 'sort'))\n", (3899, 3931), False, 'from unittest.mock import call, patch, ANY\n'), ((3945, 3984), 'unittest.mock.call', 'call', (['ANY', 'self.a.sort', '()', "{'axis': 0}"], {}), "(ANY, self.a.sort, (), {'axis': 0})\n", (3949, 3984), False, 'from unittest.mock import call, patch, ANY\n'), ((4103, 4134), 'unittest.mock.call', 'call', (['ANY', 'getattr', "(np, 'eye')"], {}), "(ANY, getattr, (np, 'eye'))\n", (4107, 4134), False, 'from unittest.mock import call, patch, ANY\n'), ((4136, 4176), 'unittest.mock.call', 'call', (['ANY', 'np.eye', '(10,)', "{'order': 'F'}"], {}), "(ANY, np.eye, (10,), {'order': 'F'})\n", (4140, 4176), False, 'from unittest.mock import call, patch, ANY\n'), ((4312, 4348), 'unittest.mock.call', 'call', (['ANY', 'getattr', "(np, 'linspace')"], {}), "(ANY, getattr, (np, 'linspace'))\n", (4316, 4348), False, 'from unittest.mock import call, patch, ANY\n'), ((4363, 4414), 'unittest.mock.call', 'call', (['ANY', 'np.linspace', '(3, 4)', "{'endpoint': False}"], {}), "(ANY, np.linspace, (3, 4), {'endpoint': False})\n", (4367, 4414), False, 'from unittest.mock import call, patch, ANY\n'), ((4526, 4573), 'unittest.mock.call', 'call', (['ANY', 'np.ndarray.reshape', '(self.a, (5, 2))'], {}), '(ANY, np.ndarray.reshape, (self.a, (5, 2)))\n', (4530, 4573), False, 'from unittest.mock import call, patch, ANY\n'), ((4664, 4697), 'unittest.mock.call', 'call', (['ANY', 'getattr', "(self.a, 'T')"], {}), "(ANY, getattr, (self.a, 'T'))\n", (4668, 4697), False, 'from unittest.mock import call, patch, ANY\n'), ((4833, 4872), 'unittest.mock.call', 'call', (['ANY', 'getattr', "(np, 'concatenate')"], {}), "(ANY, getattr, (np, 'concatenate'))\n", (4837, 4872), False, 'from unittest.mock import call, patch, ANY\n'), ((4887, 4946), 'unittest.mock.call', 'call', (['ANY', 'np.concatenate', '((self.a, self.a),)', "{'axis': 0}"], {}), "(ANY, np.concatenate, ((self.a, self.a),), {'axis': 0})\n", (4891, 4946), False, 'from unittest.mock import call, patch, ANY\n'), ((5134, 5167), 'unittest.mock.call', 'call', (['ANY', 'np.ravel', '([1, 2, 3],)'], {}), '(ANY, np.ravel, ([1, 2, 3],))\n', (5138, 5167), False, 'from unittest.mock import call, patch, ANY\n'), ((5344, 5374), 'unittest.mock.call', 'call', (['ANY', 'np.ravel', '(self.a,)'], {}), '(ANY, np.ravel, (self.a,))\n', (5348, 5374), False, 'from unittest.mock import call, patch, ANY\n'), ((5464, 5492), 'unittest.mock.call', 'call', (['ANY', 'np.std', '(self.a,)'], {}), '(ANY, np.std, (self.a,))\n', (5468, 5492), False, 'from unittest.mock import call, patch, ANY\n'), ((5772, 5807), 'unittest.mock.call', 'call', (['ANY', 'getattr', "(np, 'ndarray')"], {}), "(ANY, getattr, (np, 'ndarray'))\n", (5776, 5807), False, 'from unittest.mock import call, patch, ANY\n'), ((5993, 6028), 'unittest.mock.call', 'call', (['ANY', 'op.contains', '(self.a, 1)'], {}), '(ANY, op.contains, (self.a, 1))\n', (5997, 6028), False, 'from unittest.mock import call, patch, ANY\n'), ((6144, 6175), 'unittest.mock.call', 'call', (['ANY', 'getattr', "(np, 'add')"], {}), "(ANY, getattr, (np, 'add'))\n", (6148, 6175), False, 'from unittest.mock import call, patch, ANY\n'), ((6189, 6233), 'unittest.mock.call', 'call', (['ANY', 'np.ufunc.reduce', '(np.add, self.a)'], {}), '(ANY, np.ufunc.reduce, (np.add, self.a))\n', (6193, 6233), False, 'from unittest.mock import call, patch, ANY\n'), ((6333, 6369), 'unittest.mock.call', 'call', (['ANY', 'np.ndarray.sum', '(self.a,)'], {}), '(ANY, np.ndarray.sum, (self.a,))\n', (6337, 6369), False, 'from unittest.mock import call, patch, ANY\n'), ((6491, 6526), 'unittest.mock.call', 'call', (['ANY', 'getattr', "(np, 'ndarray')"], {}), "(ANY, getattr, (np, 'ndarray'))\n", (6495, 6526), False, 'from unittest.mock import call, patch, ANY\n'), ((6528, 6564), 'unittest.mock.call', 'call', (['ANY', 'np.ndarray.sum', '(self.a,)'], {}), '(ANY, np.ndarray.sum, (self.a,))\n', (6532, 6564), False, 'from unittest.mock import call, patch, ANY\n'), ((7107, 7144), 'unittest.mock.call', 'call', (['ANY', 'getattr', "(pd, 'DataFrame')"], {}), "(ANY, getattr, (pd, 'DataFrame'))\n", (7111, 7144), False, 'from unittest.mock import call, patch, ANY\n'), ((7158, 7210), 'unittest.mock.call', 'call', (['ANY', 'pd.DataFrame.from_records', "([{'hi': 1}],)"], {}), "(ANY, pd.DataFrame.from_records, ([{'hi': 1}],))\n", (7162, 7210), False, 'from unittest.mock import call, patch, ANY\n')] |
"""Test discern.estimators.batch_integration."""
import json
import pathlib
from contextlib import ExitStack as no_raise
import numpy as np
import pandas as pd
import pytest
import tensorflow as tf
import tensorflow_addons
from discern import io
from discern.estimators import batch_integration
from discern.estimators import losses, utilities_wae
class TestDISCERN:
"""Testclass for DISCERN."""
# pylint: disable=no-self-use
def test_from_json(self, parameters):
"""Test model creation from json."""
# pylint: disable=too-many-locals
parameters_path = pathlib.Path(parameters)
with parameters_path.open('r') as file:
parameters = json.load(file)
got = batch_integration.DISCERN.from_json(parameters)
assert got.start_step == 0
assert got.wae_model is None
@pytest.mark.parametrize("with_build, with_model, exception",
[(True, False, pytest.raises(AttributeError)),
(False, True, pytest.raises(AttributeError)),
(True, True, no_raise())])
def test_encoder(self, default_model, with_build, with_model, exception):
"""Test encoder property."""
tf.keras.backend.clear_session()
if with_build:
default_model.build_model(n_genes=100, n_labels=2, scale=0)
if not with_model:
default_model.wae_model = None
with exception:
got = default_model.encoder
assert isinstance(got, tf.keras.Model)
assert got.name == 'encoder'
@pytest.mark.parametrize("with_build, with_model, exception",
[(True, False, pytest.raises(AttributeError)),
(False, True, pytest.raises(AttributeError)),
(True, True, no_raise())])
def test_decoder(self, default_model, with_build, with_model, exception):
"""Test decoder property."""
tf.keras.backend.clear_session()
if with_build:
default_model.build_model(n_genes=100, n_labels=2, scale=0)
if not with_model:
default_model.wae_model = None
with exception:
got = default_model.decoder
assert isinstance(got, tf.keras.Model)
assert got.name == 'decoder'
@pytest.mark.parametrize("is_compiled", [True, False])
def test_restore_model(self, default_model, monkeypatch, is_compiled):
"""Test restoring of a model."""
tf.keras.backend.clear_session()
def patch_load_model_from_directory(directory):
assert directory == "somedir"
model = tf.keras.Model()
if is_compiled:
model.compile(optimizer='adam', loss='mse')
return model, 0
def patch_compile(self, optimizer):
self.wae_model.compile(optimizer=optimizer, loss='mae')
monkeypatch.setattr(utilities_wae, "load_model_from_directory",
patch_load_model_from_directory)
monkeypatch.setattr(batch_integration.DISCERN, "get_optimizer",
lambda self: tf.keras.optimizers.Adagrad())
monkeypatch.setattr(batch_integration.DISCERN, "compile", patch_compile)
default_model.restore_model("somedir")
model = default_model.wae_model
if is_compiled:
assert isinstance(model.optimizer, tf.keras.optimizers.Adam)
assert model.loss == "mse"
else:
assert isinstance(model.optimizer, tf.keras.optimizers.Adagrad)
assert model.loss == "mae"
def test_build_model(self, default_model, monkeypatch):
"""Test model building."""
def patch_create_encoder(latent_dim, enc_layers, enc_norm_type,
activation_fn, input_dim, n_labels,
regularization, conditional_regularization):
# pylint: disable=too-many-arguments
assert latent_dim == default_model.latent_dim
assert enc_layers == default_model.encoder_config.layers
assert enc_norm_type == default_model.encoder_config.norm_type
assert activation_fn == default_model.activation_fn
assert input_dim == 100
assert n_labels == 2
assert regularization == default_model.encoder_config.regularization
assert (conditional_regularization ==
default_model.decoder_config.conditional_regularization)
return "Encoder"
monkeypatch.setattr(utilities_wae, "create_encoder",
patch_create_encoder)
def patch_create_decoder(latent_dim, output_cells_dim, dec_layers,
dec_norm_type, output_lsn, activation_fn,
output_fn, n_labels, regularization,
conditional_regularization):
# pylint: disable=too-many-arguments
assert latent_dim == default_model.latent_dim
assert output_cells_dim == 100
assert n_labels == 2
assert dec_layers == default_model.decoder_config.layers
assert dec_norm_type == default_model.decoder_config.norm_type
assert output_lsn == default_model.output_lsn
assert activation_fn == default_model.activation_fn
assert output_fn == default_model.output_fn
assert regularization == default_model.decoder_config.regularization
assert (conditional_regularization ==
default_model.decoder_config.conditional_regularization)
return "Decoder"
monkeypatch.setattr(utilities_wae, "create_decoder",
patch_create_decoder)
def patch_create_model(encoder, decoder, total_cells):
assert encoder == "Encoder"
assert decoder == "Decoder"
assert total_cells == 0
return "Model"
monkeypatch.setattr(utilities_wae, "create_model", patch_create_model)
monkeypatch.setattr(batch_integration.DISCERN, "get_optimizer",
lambda self: "Optimizer")
def patch_compile(_, optimizer, scale):
assert scale == 15000
assert optimizer == "Optimizer"
monkeypatch.setattr(batch_integration.DISCERN, "compile", patch_compile)
default_model.build_model(n_genes=100, n_labels=2, scale=0)
assert default_model.wae_model == "Model"
@pytest.mark.parametrize("with_decay", [True, False])
@pytest.mark.parametrize("with_lookahead", [True, False])
@pytest.mark.parametrize("algo", ["Adam", 'Adagrad'])
def test_get_optimizer(self, default_model, with_decay, with_lookahead,
algo):
"""Test optimizer creation."""
algo = "tensorflow.keras.optimizers." + algo
config = {
"learning_rate": 0.1,
"algorithm": algo,
"epsilon": 1e-08,
}
if with_decay:
config["learning_decay"] = dict(
name="tensorflow.keras.optimizers.schedules.ExponentialDecay",
decay_steps=1,
decay_rate=0.2)
if with_lookahead:
config["Lookahead"] = True
default_model.optimizer_config = config
got = default_model.get_optimizer()
if with_lookahead:
assert isinstance(got,
tensorflow_addons.optimizers.lookahead.Lookahead)
got = got._optimizer # pylint: disable=protected-access
if algo.endswith('Adam'):
assert isinstance(got, tf.keras.optimizers.Adam)
elif algo.endswith('Adagrad'):
assert isinstance(got, tf.keras.optimizers.Adagrad)
else:
raise AssertionError("Invalid config")
got = got.get_config()
assert got['epsilon'] == config['epsilon']
if with_decay:
assert got["learning_rate"] == {
'class_name': 'ExponentialDecay',
'config': {
'decay_rate': 0.2,
'decay_steps': 1,
'initial_learning_rate': 0.1,
'name': None,
'staircase': False
}
}
else:
assert got['learning_rate'] == config['learning_rate']
def test_compile(self, default_model, monkeypatch):
"""Test compiling model."""
def patch_reconstruction_loss(losstype):
assert losstype == default_model.recon_loss_type
return "mse"
monkeypatch.setattr(losses, "reconstruction_loss",
patch_reconstruction_loss)
default_model.build_model(n_genes=100, n_labels=2, scale=0)
default_model.compile("Adam")
model = default_model.wae_model
assert model._is_compiled # pylint: disable=protected-access
assert isinstance(model.optimizer, tf.keras.optimizers.Adam)
assert len(model.loss) == 4
assert isinstance(model.loss['decoder_dropouts'],
losses.MaskedCrossEntropy)
assert model.loss['decoder_counts'] == "mse"
assert isinstance(model.loss['sigma_regularization'], losses.DummyLoss)
assert isinstance(model.loss['mmdpp'], losses.DummyLoss)
assert model.loss_weights == {
"decoder_counts": 15000.0,
"decoder_dropouts": default_model.weighting_decoder_dropout,
"sigma_regularization": default_model.weighting_random_encoder,
"mmdpp": default_model.wae_lambda
}
assert len(model.metrics) == 0
@pytest.mark.parametrize("savepath", (True, False))
def test_training(self, default_model, monkeypatch, savepath):
"""Test training function without performing actual training."""
exp_batchsize = 10
exp_maxstep = 1
monkeypatch.setattr(
batch_integration._LOGGER, # pylint: disable=protected-access
'getEffectiveLevel',
lambda: 20)
default_model.build_model(n_genes=100, n_labels=2, scale=0)
class _PatchDISCERNData:
def __init__(self):
traindataset = tf.data.Dataset.from_tensor_slices(np.zeros(10))
validdataset = tf.data.Dataset.from_tensor_slices(np.ones(10))
self.tfdata = traindataset, validdataset
self.batch_size = exp_batchsize
self.config = {"total_train_count": 10}
def patch_fit(x, epochs, validation_data, verbose, callbacks,
initial_epoch):
# pylint: disable=too-many-arguments, invalid-name
assert isinstance(x, tf.data.Dataset)
for val in x:
assert val == 0.
assert isinstance(validation_data, tf.data.Dataset)
for val in validation_data:
assert val == 1.
assert epochs == exp_maxstep
assert verbose == 1
assert callbacks == "Callbacks"
assert initial_epoch == 0.
return "Result"
def _check_save(*_, **unused_kwargs):
assert savepath
monkeypatch.setattr(default_model.wae_model, "fit", patch_fit)
monkeypatch.setattr(default_model.wae_model, "save", _check_save)
got = default_model.training(savepath=savepath if savepath else None,
inputdata=_PatchDISCERNData(),
max_steps=exp_maxstep,
callbacks="Callbacks")
assert got == "Result"
def test_generate_latent_codes(self, default_model):
"""Test generation of latent codes."""
exp_batchsize = 1
counts = np.random.uniform(0, 4, 20).reshape(10, 2) - 2
labels = np.ones(10)[:, np.newaxis]
exp_counts = counts.copy()
class PatchEncoder:
"""Patch for don't using real encoder."""
# pylint: disable=too-few-public-methods
def predict(self, dataset, batch_size):
"""Predict test."""
assert batch_size == exp_batchsize
assert len(dataset) == 2
assert isinstance(dataset['encoder_labels'], tf.Tensor)
for val in dataset['encoder_labels']:
np.testing.assert_allclose(val, np.ones((1, )))
assert isinstance(dataset['encoder_input'], tf.Tensor)
for i, val in enumerate(dataset['encoder_input']):
np.testing.assert_allclose(val, exp_counts[i])
class PatchModel:
"""Patch for don't using real model."""
# pylint: disable=too-few-public-methods
def get_layer(self, layername):
"""Get layer patched."""
if layername == "encoder":
return PatchEncoder()
raise AssertionError('Invalid layer')
default_model.wae_model = PatchModel()
default_model.generate_latent_codes(counts=counts,
batch_labels=labels,
batch_size=exp_batchsize)
def test_generate_cells_from_latent(self, default_model):
"""Test generation of cells."""
exp_batchsize = 1
latent = np.random.rand(10, 2)
labels = np.ones(10)[:, np.newaxis]
class PatchDecoder:
"""Patch for don't using real decoder."""
# pylint: disable=too-few-public-methods
def predict(self, dataset, batch_size):
"""Predict test."""
assert batch_size == exp_batchsize
assert len(dataset) == 2
assert isinstance(dataset['decoder_labels'], tf.Tensor)
for val in dataset['decoder_labels']:
np.testing.assert_allclose(val, np.ones((1, )))
assert isinstance(dataset['decoder_input'], tf.Tensor)
for i, val in enumerate(dataset['decoder_input']):
np.testing.assert_allclose(val, latent[i])
class PatchModel:
"""Patch for don't using real model."""
# pylint: disable=too-few-public-methods
def get_layer(self, layername):
"""Get layer patched."""
if layername == "decoder":
return PatchDecoder()
raise AssertionError('Invalid layer')
default_model.wae_model = PatchModel()
default_model.generate_cells_from_latent(latent_codes=latent,
output_batch_labels=labels,
batch_size=exp_batchsize)
@pytest.mark.parametrize("inputs",
[
dict(metadata=[("batch", "batch1"),
("batch", "batch2"),
("batch", None),
("metadata", "type1"),
("metadata", "type2"),
("metadata", None)],
is_scaled=False,
exception=no_raise(),
exp_frequencies=[
{
"batch1": [1., 0.],
"batch2": [1., 0.],
},
{
"batch1": [0., 1.],
"batch2": [0., 1.],
},
{
"batch1": [0.4, 0.6],
"batch2": [0.4, 0.6],
},
{
"type1": [1.0, 0.0],
"type2": [1.0, 0.0],
},
{
"type1": [0.25, 0.75],
"type2": [0.25, 0.75],
},
{
"type1": [1., 0.0],
"type2": [0.25, 0.75],
},
]),
dict(metadata=[("batch", "batch1"),
("batch", "batch2"),
("batch", None),
("metadata", "type1"),
("metadata", "type2"),
("metadata", None)],
is_scaled=True,
exception=no_raise(),
exp_frequencies=[
{
"batch1": [1., 0.],
"batch2": [1., 0.],
},
{
"batch1": [0., 1.],
"batch2": [0., 1.],
},
{
"batch1": [0.4, 0.6],
"batch2": [0.4, 0.6],
},
{
"type1": [1.0, 0.0],
"type2": [1.0, 0.0],
},
{
"type1": [0.25, 0.75],
"type2": [0.25, 0.75],
},
{
"type1": [1., 0.0],
"type2": [0.25, 0.75],
},
]),
dict(metadata=[("batch", )],
is_scaled=False,
exception=pytest.raises(ValueError),
exp_frequencies=[]),
dict(metadata=[("invalid_column", "batch1")],
is_scaled=False,
exception=pytest.raises(KeyError),
exp_frequencies=[]),
dict(metadata=[("metadata", "invalid_value")],
is_scaled=False,
exception=pytest.raises(ValueError),
exp_frequencies=[]),
])
def test_project_to_metadata(self, monkeypatch, tmp_path, anndata_file,
default_model, inputs):
"""Test project_to_metadata."""
# pylint: disable=too-many-arguments, too-many-locals, too-many-statements
exp_batchsize = 10
anndata_file = io.DISCERNData(anndata_file(100), batch_size=10)
anndata_file.uns.pop("fixed_scaling", None)
exp_threshold = 0.0
if inputs["is_scaled"]:
exp_threshold = -np.inf
anndata_file.uns["fixed_scaling"] = {}
anndata_file.obs["metadata"] = ["type1"] * 20 + ["type2"] * 80
batches = ["batch1"] * 20 + ["batch2"] * 30
batches += ["batch1"] * 20 + ["batch2"] * 30
anndata_file.obs["batch"] = batches
anndata_file.obs["batch"] = anndata_file.obs.batch.astype("category")
default_model.build_model(n_genes=100, n_labels=2, scale=0)
def patch_generate_latent_codes(data, labels, batchsize):
np.testing.assert_equal(data, anndata_file.X)
assert labels.shape == (100, 2)
labels = labels.argmax(axis=1)
assert (labels == anndata_file.obs["batch"].cat.codes).all()
assert batchsize == exp_batchsize
return "latent", None
monkeypatch.setattr(default_model, "generate_latent_codes",
patch_generate_latent_codes)
metadata = inputs["metadata"].copy()
second_metadata_check = metadata.copy()
exp_frequencies = inputs.pop("exp_frequencies")
def patch_generate_cells_from_latent(latent, labels, batchsize):
assert latent == "latent"
assert batchsize == exp_batchsize
curr_col, curr_val = metadata.pop(0)
if curr_val == "invalid_value":
return (curr_col, curr_val)
got_freq = pd.DataFrame(
labels, columns=anndata_file.obs.batch.cat.categories)
got_freq[curr_col] = anndata_file.obs[curr_col].reset_index(
drop=True)
got_freq.drop_duplicates(inplace=True)
got_freq.set_index(curr_col, inplace=True)
exp_freq = pd.DataFrame.from_dict(
exp_frequencies.pop(0),
orient="index",
columns=anndata_file.obs.batch.cat.categories)
pd.testing.assert_frame_equal(got_freq,
exp_freq,
check_index_type=False,
check_column_type=False,
check_categorical=False,
check_dtype=False,
check_names=False)
return (curr_col, curr_val)
monkeypatch.setattr(default_model, "generate_cells_from_latent",
patch_generate_cells_from_latent)
def patch_generate_h5ad(counts, threshold, save_path, var, obs, uns,
obsm):
# pylint: disable=too-many-arguments
assert threshold == exp_threshold
assert (var == anndata_file.var).all(axis=None)
assert (obs == anndata_file.obs).all(axis=None)
assert uns == anndata_file.uns
assert obsm["X_DISCERN"] == "latent"
curr_metadata = second_metadata_check.pop(0)
assert counts == curr_metadata
exp_save_path = str(
pathlib.Path(tmp_path,
"projected_to_average_{}".format(counts[0])))
if counts[1]:
exp_save_path += "_{}".format(counts[1])
exp_save_path += ".h5ad"
assert save_path == pathlib.Path(exp_save_path)
monkeypatch.setattr(io, "generate_h5ad", patch_generate_h5ad)
with inputs["exception"]:
default_model.project_to_metadata(input_data=anndata_file,
metadata=inputs["metadata"],
save_path=tmp_path)
assert len(metadata) == 0
assert len(second_metadata_check) == 0
| [
"pandas.DataFrame",
"numpy.random.uniform",
"pandas.testing.assert_frame_equal",
"json.load",
"tensorflow.keras.backend.clear_session",
"numpy.testing.assert_allclose",
"numpy.zeros",
"numpy.ones",
"contextlib.ExitStack",
"tensorflow.keras.Model",
"discern.estimators.batch_integration.DISCERN.fr... | [((2354, 2407), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""is_compiled"""', '[True, False]'], {}), "('is_compiled', [True, False])\n", (2377, 2407), False, 'import pytest\n'), ((6573, 6625), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""with_decay"""', '[True, False]'], {}), "('with_decay', [True, False])\n", (6596, 6625), False, 'import pytest\n'), ((6631, 6687), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""with_lookahead"""', '[True, False]'], {}), "('with_lookahead', [True, False])\n", (6654, 6687), False, 'import pytest\n'), ((6693, 6745), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""algo"""', "['Adam', 'Adagrad']"], {}), "('algo', ['Adam', 'Adagrad'])\n", (6716, 6745), False, 'import pytest\n'), ((9759, 9809), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""savepath"""', '(True, False)'], {}), "('savepath', (True, False))\n", (9782, 9809), False, 'import pytest\n'), ((594, 618), 'pathlib.Path', 'pathlib.Path', (['parameters'], {}), '(parameters)\n', (606, 618), False, 'import pathlib\n'), ((722, 769), 'discern.estimators.batch_integration.DISCERN.from_json', 'batch_integration.DISCERN.from_json', (['parameters'], {}), '(parameters)\n', (757, 769), False, 'from discern.estimators import batch_integration\n'), ((1241, 1273), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (1271, 1273), True, 'import tensorflow as tf\n'), ((1994, 2026), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (2024, 2026), True, 'import tensorflow as tf\n'), ((2532, 2564), 'tensorflow.keras.backend.clear_session', 'tf.keras.backend.clear_session', ([], {}), '()\n', (2562, 2564), True, 'import tensorflow as tf\n'), ((13473, 13494), 'numpy.random.rand', 'np.random.rand', (['(10)', '(2)'], {}), '(10, 2)\n', (13487, 13494), True, 'import numpy as np\n'), ((692, 707), 'json.load', 'json.load', (['file'], {}), '(file)\n', (701, 707), False, 'import json\n'), ((2684, 2700), 'tensorflow.keras.Model', 'tf.keras.Model', ([], {}), '()\n', (2698, 2700), True, 'import tensorflow as tf\n'), ((11950, 11961), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (11957, 11961), True, 'import numpy as np\n'), ((13512, 13523), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (13519, 13523), True, 'import numpy as np\n'), ((20702, 20747), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['data', 'anndata_file.X'], {}), '(data, anndata_file.X)\n', (20725, 20747), True, 'import numpy as np\n'), ((21583, 21650), 'pandas.DataFrame', 'pd.DataFrame', (['labels'], {'columns': 'anndata_file.obs.batch.cat.categories'}), '(labels, columns=anndata_file.obs.batch.cat.categories)\n', (21595, 21650), True, 'import pandas as pd\n'), ((22068, 22237), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['got_freq', 'exp_freq'], {'check_index_type': '(False)', 'check_column_type': '(False)', 'check_categorical': '(False)', 'check_dtype': '(False)', 'check_names': '(False)'}), '(got_freq, exp_freq, check_index_type=False,\n check_column_type=False, check_categorical=False, check_dtype=False,\n check_names=False)\n', (22097, 22237), True, 'import pandas as pd\n'), ((953, 982), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (966, 982), False, 'import pytest\n'), ((1029, 1058), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (1042, 1058), False, 'import pytest\n'), ((1104, 1114), 'contextlib.ExitStack', 'no_raise', ([], {}), '()\n', (1112, 1114), True, 'from contextlib import ExitStack as no_raise\n'), ((1706, 1735), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (1719, 1735), False, 'import pytest\n'), ((1782, 1811), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (1795, 1811), False, 'import pytest\n'), ((1857, 1867), 'contextlib.ExitStack', 'no_raise', ([], {}), '()\n', (1865, 1867), True, 'from contextlib import ExitStack as no_raise\n'), ((3177, 3206), 'tensorflow.keras.optimizers.Adagrad', 'tf.keras.optimizers.Adagrad', ([], {}), '()\n', (3204, 3206), True, 'import tensorflow as tf\n'), ((23481, 23508), 'pathlib.Path', 'pathlib.Path', (['exp_save_path'], {}), '(exp_save_path)\n', (23493, 23508), False, 'import pathlib\n'), ((10363, 10375), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (10371, 10375), True, 'import numpy as np\n'), ((10443, 10454), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (10450, 10454), True, 'import numpy as np\n'), ((11886, 11913), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(4)', '(20)'], {}), '(0, 4, 20)\n', (11903, 11913), True, 'import numpy as np\n'), ((12681, 12727), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['val', 'exp_counts[i]'], {}), '(val, exp_counts[i])\n', (12707, 12727), True, 'import numpy as np\n'), ((14208, 14250), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['val', 'latent[i]'], {}), '(val, latent[i])\n', (14234, 14250), True, 'import numpy as np\n'), ((15466, 15476), 'contextlib.ExitStack', 'no_raise', ([], {}), '()\n', (15474, 15476), True, 'from contextlib import ExitStack as no_raise\n'), ((17433, 17443), 'contextlib.ExitStack', 'no_raise', ([], {}), '()\n', (17441, 17443), True, 'from contextlib import ExitStack as no_raise\n'), ((19049, 19074), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (19062, 19074), False, 'import pytest\n'), ((19317, 19340), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (19330, 19340), False, 'import pytest\n'), ((19584, 19609), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (19597, 19609), False, 'import pytest\n'), ((12507, 12520), 'numpy.ones', 'np.ones', (['(1,)'], {}), '((1,))\n', (12514, 12520), True, 'import numpy as np\n'), ((14034, 14047), 'numpy.ones', 'np.ones', (['(1,)'], {}), '((1,))\n', (14041, 14047), True, 'import numpy as np\n')] |
from argparse import ArgumentParser
import pandas as pd
import numpy as np
from fyne import blackscholes, heston
import matplotlib.pyplot as plt
def _years_to_expiry(date, expiry):
return (expiry - date)/pd.to_timedelta('365d')
def plot(underlying, ivs, params, date):
time = pd.to_datetime(f"{date} 12:15:00")
ivs.columns.name = 'Side'
groups = ivs.xs(time, level='Time').stack().groupby('Expiry')
fig, axs = plt.subplots(len(groups), sharex=True, figsize=(8, 10))
strike_min = np.min(ivs.index.get_level_values('Strike').values)
strike_max = np.max(ivs.index.get_level_values('Strike').values)
strike_grid = np.linspace(strike_min, strike_max, 20)
for ax, (e, g) in zip(axs, groups):
g.index = g.index.droplevel(['Expiry', 'Side'])
g.xs('C').plot(ax=ax, linewidth=0, marker='_', markersize=3)
g.xs('P').plot(ax=ax, linewidth=0, marker='_', markersize=3, color='g')
heston_prices = heston.formula(underlying.loc[time], strike_grid,
_years_to_expiry(date, e), *params)
heston_ivs = pd.Series(
blackscholes.implied_vol(underlying.loc[time], strike_grid,
_years_to_expiry(date, e), heston_prices),
strike_grid)
heston_ivs.plot(ax=ax, color='gray').set_ylabel('Implied volatility')
ax.set_title("Expiry: {}".format(e.strftime('%Y-%m-%d')))
return fig
if __name__ == '__main__':
cli = ArgumentParser()
cli.add_argument('date')
cli.add_argument('underlying_filename')
cli.add_argument('ivs_filename')
cli.add_argument('params_filename')
cli.add_argument('dest_filename')
args = cli.parse_args()
underlying = pd.read_parquet(args.underlying_filename).mean(axis=1)
ivs = pd.read_parquet(args.ivs_filename)
date = pd.to_datetime(args.date)
params = pd.read_parquet(args.params_filename)['Value']
fig = plot(underlying, ivs, params, date)
fig.savefig(args.dest_filename)
| [
"argparse.ArgumentParser",
"pandas.to_timedelta",
"pandas.to_datetime",
"pandas.read_parquet",
"numpy.linspace"
] | [((290, 324), 'pandas.to_datetime', 'pd.to_datetime', (['f"""{date} 12:15:00"""'], {}), "(f'{date} 12:15:00')\n", (304, 324), True, 'import pandas as pd\n'), ((650, 689), 'numpy.linspace', 'np.linspace', (['strike_min', 'strike_max', '(20)'], {}), '(strike_min, strike_max, 20)\n', (661, 689), True, 'import numpy as np\n'), ((1496, 1512), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (1510, 1512), False, 'from argparse import ArgumentParser\n'), ((1812, 1846), 'pandas.read_parquet', 'pd.read_parquet', (['args.ivs_filename'], {}), '(args.ivs_filename)\n', (1827, 1846), True, 'import pandas as pd\n'), ((1858, 1883), 'pandas.to_datetime', 'pd.to_datetime', (['args.date'], {}), '(args.date)\n', (1872, 1883), True, 'import pandas as pd\n'), ((212, 235), 'pandas.to_timedelta', 'pd.to_timedelta', (['"""365d"""'], {}), "('365d')\n", (227, 235), True, 'import pandas as pd\n'), ((1897, 1934), 'pandas.read_parquet', 'pd.read_parquet', (['args.params_filename'], {}), '(args.params_filename)\n', (1912, 1934), True, 'import pandas as pd\n'), ((1747, 1788), 'pandas.read_parquet', 'pd.read_parquet', (['args.underlying_filename'], {}), '(args.underlying_filename)\n', (1762, 1788), True, 'import pandas as pd\n')] |
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from datetime import datetime
from datagen import genheatmap
from model import build_model
np.set_printoptions(threshold=np.inf, linewidth=np.inf)
test_anno_file_path = '../datasets/wider_face/full_test_anno.txt'
test_img_dir_path = '../datasets/wider_face/full_test_images'
output_path = 'output'
ishape = [448, 448, 1]
total_test_examples = 3164 # 3164
batch_size = 1
total_test_batches = total_test_examples//batch_size
model = build_model(ishape=ishape, mode='test')
# model.summary()
model.load_weights('{}/weights.h5'.format(output_path), by_name=True)
gen = genheatmap(
anno_file_path=test_anno_file_path,
img_dir_path=test_img_dir_path,
ishape=ishape,
total_batches=total_test_examples,
batch_size=batch_size)
for _ in range(total_test_batches):
batchx4d, _ = next(gen)
print('Start: {}'.format(datetime.now().time()))
prediction = model.predict_on_batch(batchx4d) # (batch_size, h, w, 5)
print('End: {}'.format(datetime.now().time()))
heatmap4d = prediction.numpy()
heatmap4d -= batchx4d/255
heatmap3d = heatmap4d[:, :, :, 0]
for i in range(batch_size):
hm12d = heatmap3d[i]
# hm12d = hm12d/np.max(hm12d)
# hm12d = np.where(hm12d > 0.8, hm12d, 0)
_, ax = plt.subplots(1, 2, figsize=(15, 7.35))
ax[0].imshow(batchx4d[i, :, :, 0]/255, vmin=0, vmax=1)
ax[1].imshow(hm12d, vmin=0, vmax=1)
plt.show()
| [
"numpy.set_printoptions",
"matplotlib.pyplot.show",
"datagen.genheatmap",
"model.build_model",
"datetime.datetime.now",
"matplotlib.pyplot.subplots"
] | [((168, 223), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.inf', 'linewidth': 'np.inf'}), '(threshold=np.inf, linewidth=np.inf)\n', (187, 223), True, 'import numpy as np\n'), ((510, 549), 'model.build_model', 'build_model', ([], {'ishape': 'ishape', 'mode': '"""test"""'}), "(ishape=ishape, mode='test')\n", (521, 549), False, 'from model import build_model\n'), ((645, 805), 'datagen.genheatmap', 'genheatmap', ([], {'anno_file_path': 'test_anno_file_path', 'img_dir_path': 'test_img_dir_path', 'ishape': 'ishape', 'total_batches': 'total_test_examples', 'batch_size': 'batch_size'}), '(anno_file_path=test_anno_file_path, img_dir_path=\n test_img_dir_path, ishape=ishape, total_batches=total_test_examples,\n batch_size=batch_size)\n', (655, 805), False, 'from datagen import genheatmap\n'), ((1273, 1311), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(15, 7.35)'}), '(1, 2, figsize=(15, 7.35))\n', (1285, 1311), True, 'import matplotlib.pyplot as plt\n'), ((1409, 1419), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1417, 1419), True, 'import matplotlib.pyplot as plt\n'), ((895, 909), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (907, 909), False, 'from datetime import datetime\n'), ((1014, 1028), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1026, 1028), False, 'from datetime import datetime\n')] |
import math
import matplotlib.pyplot as plt
import numpy as np
import os
import time
import tensorflow as tf
def run_model(sess, X, y, is_training, predict, loss_val,
Xd, yd,
epochs=1, batch_size=64, print_every=100,
training=None, plot_losses=False, learning_rate=None, learning_rate_value=10e-3, part_of_dataset=1.0,
snapshot_name=None,
):
# have tensorflow compute accuracy
correct_prediction = tf.equal(tf.argmax(predict, 1), y)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# shuffle indicies
train_indicies = np.arange(Xd.shape[0])
np.random.shuffle(train_indicies)
training_now = training is not None
# setting up variables we want to compute (and optimizing)
# if we have a training function, add that to things we compute
variables = [loss_val, correct_prediction, accuracy]
if training_now:
variables[-1] = training
all_losses = []
all_correct = []
# counter
iter_cnt = 0
saver = tf.train.Saver()
snapshot_filename = None
if snapshot_name:
snapshot_filename = f'./snapshots/{snapshot_name}/model'
snapshot_dir = f'./snapshots/{snapshot_name}'
try:
saver.restore(sess, snapshot_filename)
print(f'restored snapshot {snapshot_filename}')
except tf.errors.InvalidArgumentError:
# can't load data
print(f'haven\'t restore snapshot {snapshot_filename}')
pass
for e in range(epochs):
# keep track of losses and accuracy
correct = 0
losses = []
# make sure we iterate over the dataset once
for i in range(int(math.ceil(Xd.shape[0] / batch_size * part_of_dataset))):
# generate indicies for the batch
start_idx = (i * batch_size) % Xd.shape[0]
idx = train_indicies[start_idx:start_idx + batch_size]
# create a feed dictionary for this batch
feed_dict = {
X: Xd[idx, :],
y: yd[idx],
is_training: training_now,
}
if learning_rate is not None:
if isinstance(learning_rate_value, float):
feed_dict[learning_rate] = learning_rate_value
elif isinstance(learning_rate_value, list):
feed_dict[learning_rate] = learning_rate_value[e]
else:
raise Error('unsupported learning_rate, valid are list or float')
# get batch size
actual_batch_size = yd[idx].shape[0]
# have tensorflow compute loss and correct predictions
# and (if given) perform a training step
loss, corr, _ = sess.run(variables, feed_dict=feed_dict)
# TODO:
# - we may want to calculate validation accuracy here
# - maybe we need to store dynamic of accuracy (trainging) on each 10 (100) samples
# or even each epoch
# aggregate performance stats
losses.append(loss * actual_batch_size)
correct += np.sum(corr)
all_correct.append(np.sum(corr) / actual_batch_size)
all_losses.append(loss)
# print every now and then
if training_now and (iter_cnt % print_every) == 0:
print("Iteration {0}: with minibatch training loss = {1:.3g} and accuracy of {2:.2g}" \
.format(iter_cnt, loss, np.sum(corr) / actual_batch_size))
iter_cnt += 1
total_correct = correct / Xd.shape[0]
total_loss = np.sum(losses) / Xd.shape[0]
print("Epoch {2}, Overall loss = {0:.3g} and accuracy of {1:.3g}" \
.format(total_loss, total_correct, e + 1))
if plot_losses:
plt.plot(losses)
plt.grid(True)
plt.title('Epoch {} Loss'.format(e + 1))
plt.xlabel('minibatch number')
plt.ylabel('minibatch loss')
plt.show()
if training_now and snapshot_name is not None:
if not os.path.exists(snapshot_dir):
os.makedirs(snapshot_dir)
save_path = saver.save(sess, snapshot_filename)
print(f'Model saved in path: {save_path}')
return total_loss, total_correct, all_losses, all_correct
| [
"numpy.sum",
"matplotlib.pyplot.show",
"tensorflow.train.Saver",
"matplotlib.pyplot.plot",
"tensorflow.argmax",
"math.ceil",
"os.makedirs",
"os.path.exists",
"tensorflow.cast",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid",
"numpy.random.s... | [((629, 651), 'numpy.arange', 'np.arange', (['Xd.shape[0]'], {}), '(Xd.shape[0])\n', (638, 651), True, 'import numpy as np\n'), ((656, 689), 'numpy.random.shuffle', 'np.random.shuffle', (['train_indicies'], {}), '(train_indicies)\n', (673, 689), True, 'import numpy as np\n'), ((1060, 1076), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (1074, 1076), True, 'import tensorflow as tf\n'), ((487, 508), 'tensorflow.argmax', 'tf.argmax', (['predict', '(1)'], {}), '(predict, 1)\n', (496, 508), True, 'import tensorflow as tf\n'), ((543, 582), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (550, 582), True, 'import tensorflow as tf\n'), ((3156, 3168), 'numpy.sum', 'np.sum', (['corr'], {}), '(corr)\n', (3162, 3168), True, 'import numpy as np\n'), ((3652, 3666), 'numpy.sum', 'np.sum', (['losses'], {}), '(losses)\n', (3658, 3666), True, 'import numpy as np\n'), ((3850, 3866), 'matplotlib.pyplot.plot', 'plt.plot', (['losses'], {}), '(losses)\n', (3858, 3866), True, 'import matplotlib.pyplot as plt\n'), ((3879, 3893), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (3887, 3893), True, 'import matplotlib.pyplot as plt\n'), ((3959, 3989), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""minibatch number"""'], {}), "('minibatch number')\n", (3969, 3989), True, 'import matplotlib.pyplot as plt\n'), ((4002, 4030), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""minibatch loss"""'], {}), "('minibatch loss')\n", (4012, 4030), True, 'import matplotlib.pyplot as plt\n'), ((4043, 4053), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4051, 4053), True, 'import matplotlib.pyplot as plt\n'), ((1726, 1779), 'math.ceil', 'math.ceil', (['(Xd.shape[0] / batch_size * part_of_dataset)'], {}), '(Xd.shape[0] / batch_size * part_of_dataset)\n', (1735, 1779), False, 'import math\n'), ((4129, 4157), 'os.path.exists', 'os.path.exists', (['snapshot_dir'], {}), '(snapshot_dir)\n', (4143, 4157), False, 'import os\n'), ((4175, 4200), 'os.makedirs', 'os.makedirs', (['snapshot_dir'], {}), '(snapshot_dir)\n', (4186, 4200), False, 'import os\n'), ((3201, 3213), 'numpy.sum', 'np.sum', (['corr'], {}), '(corr)\n', (3207, 3213), True, 'import numpy as np\n'), ((3524, 3536), 'numpy.sum', 'np.sum', (['corr'], {}), '(corr)\n', (3530, 3536), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
track_name = "Canada_Training"
absolute_path = "."
waypoints = np.load("%s/%s.npy" % (absolute_path, track_name))
print("Number of waypoints = " + str(waypoints.shape[0]))
for i, point in enumerate(waypoints):
waypoint = (point[2], point[3])
plt.scatter(waypoint[0], waypoint[1])
print("Waypoint " + str(i) + ": " + str(waypoint))
plt.show()
| [
"matplotlib.pyplot.scatter",
"numpy.load",
"matplotlib.pyplot.show"
] | [((120, 170), 'numpy.load', 'np.load', (["('%s/%s.npy' % (absolute_path, track_name))"], {}), "('%s/%s.npy' % (absolute_path, track_name))\n", (127, 170), True, 'import numpy as np\n'), ((410, 420), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (418, 420), True, 'import matplotlib.pyplot as plt\n'), ((313, 350), 'matplotlib.pyplot.scatter', 'plt.scatter', (['waypoint[0]', 'waypoint[1]'], {}), '(waypoint[0], waypoint[1])\n', (324, 350), True, 'import matplotlib.pyplot as plt\n')] |
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the parallel_py_environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import numpy as np
import tensorflow as tf
from tf_agents.environments import parallel_py_environment
from tf_agents.environments import random_py_environment
from tf_agents.environments import time_step as ts
from tf_agents.specs import array_spec
class ParallelPyEnvironmentTest(tf.test.TestCase):
def _make_parallel_py_environment(self, constructor=None, num_envs=2):
self.observation_spec = array_spec.ArraySpec((3, 3), np.float32)
self.time_step_spec = ts.time_step_spec(self.observation_spec)
self.action_spec = array_spec.BoundedArraySpec(
[7], dtype=np.float32, minimum=-1.0, maximum=1.0)
constructor = constructor or functools.partial(
random_py_environment.RandomPyEnvironment,
self.observation_spec,
self.action_spec)
return parallel_py_environment.ParallelPyEnvironment(
env_constructors=[constructor] * num_envs, blocking=True)
def test_close_no_hang_after_init(self):
env = self._make_parallel_py_environment()
env.close()
def test_get_specs(self):
env = self._make_parallel_py_environment()
self.assertEqual(self.observation_spec, env.observation_spec())
self.assertEqual(self.time_step_spec, env.time_step_spec())
self.assertEqual(self.action_spec, env.action_spec())
env.close()
def test_step(self):
num_envs = 2
env = self._make_parallel_py_environment(num_envs=num_envs)
action_spec = env.action_spec()
observation_spec = env.observation_spec()
rng = np.random.RandomState()
action = np.array([array_spec.sample_bounded_spec(action_spec, rng)
for _ in range(num_envs)])
env.reset()
# Take one step and assert observation is batched the right way.
time_step = env.step(action)
self.assertEqual(num_envs, time_step.observation.shape[0])
self.assertAllEqual(observation_spec.shape, time_step.observation.shape[1:])
self.assertEqual(num_envs, action.shape[0])
self.assertAllEqual(action_spec.shape, action.shape[1:])
# Take another step and assert that observations have the same shape.
time_step2 = env.step(action)
self.assertAllEqual(time_step.observation.shape,
time_step2.observation.shape)
env.close()
def test_unstack_actions(self):
num_envs = 2
env = self._make_parallel_py_environment(num_envs=num_envs)
action_spec = env.action_spec()
rng = np.random.RandomState()
batched_action = np.array([array_spec.sample_bounded_spec(action_spec, rng)
for _ in range(num_envs)])
# Test that actions are correctly unstacked when just batched in np.array.
unstacked_actions = env._unstack_actions(batched_action)
for action in unstacked_actions:
self.assertAllEqual(action_spec.shape,
action.shape)
env.close()
def test_unstack_nested_actions(self):
num_envs = 2
env = self._make_parallel_py_environment(num_envs=num_envs)
action_spec = env.action_spec()
rng = np.random.RandomState()
batched_action = np.array([array_spec.sample_bounded_spec(action_spec, rng)
for _ in range(num_envs)])
# Test that actions are correctly unstacked when nested in namedtuple.
class NestedAction(collections.namedtuple(
'NestedAction', ['action', 'other_var'])):
pass
nested_action = NestedAction(action=batched_action,
other_var=np.array([13.0]*num_envs))
unstacked_actions = env._unstack_actions(nested_action)
for nested_action in unstacked_actions:
self.assertAllEqual(action_spec.shape,
nested_action.action.shape)
self.assertEqual(13.0, nested_action.other_var)
env.close()
class ProcessPyEnvironmentTest(tf.test.TestCase):
def test_close_no_hang_after_init(self):
constructor = functools.partial(
random_py_environment.RandomPyEnvironment,
array_spec.ArraySpec((3, 3), np.float32),
array_spec.BoundedArraySpec([1], np.float32, minimum=-1.0, maximum=1.0),
episode_end_probability=0, min_duration=2, max_duration=2)
env = parallel_py_environment.ProcessPyEnvironment(constructor)
env.start()
env.close()
def test_close_no_hang_after_step(self):
constructor = functools.partial(
random_py_environment.RandomPyEnvironment,
array_spec.ArraySpec((3, 3), np.float32),
array_spec.BoundedArraySpec([1], np.float32, minimum=-1.0, maximum=1.0),
episode_end_probability=0, min_duration=5, max_duration=5)
rng = np.random.RandomState()
env = parallel_py_environment.ProcessPyEnvironment(constructor)
env.start()
action_spec = env.action_spec()
env.reset()
env.step(array_spec.sample_bounded_spec(action_spec, rng))
env.step(array_spec.sample_bounded_spec(action_spec, rng))
env.close()
def test_reraise_exception_in_init(self):
constructor = MockEnvironmentCrashInInit
env = parallel_py_environment.ProcessPyEnvironment(constructor)
with self.assertRaises(Exception):
env.start()
def test_reraise_exception_in_reset(self):
constructor = MockEnvironmentCrashInReset
env = parallel_py_environment.ProcessPyEnvironment(constructor)
env.start()
with self.assertRaises(Exception):
env.reset()
def test_reraise_exception_in_step(self):
constructor = functools.partial(
MockEnvironmentCrashInStep, crash_at_step=3)
env = parallel_py_environment.ProcessPyEnvironment(constructor)
env.start()
env.reset()
action_spec = env.action_spec()
rng = np.random.RandomState()
env.step(array_spec.sample_bounded_spec(action_spec, rng))
env.step(array_spec.sample_bounded_spec(action_spec, rng))
with self.assertRaises(Exception):
env.step(array_spec.sample_bounded_spec(action_spec, rng))
class MockEnvironmentCrashInInit(object):
"""Raise an error when instantiated."""
def __init__(self, *unused_args, **unused_kwargs):
raise RuntimeError()
def action_spec(self):
return []
class MockEnvironmentCrashInReset(object):
"""Raise an error when instantiated."""
def __init__(self, *unused_args, **unused_kwargs):
pass
def action_spec(self):
return []
def _reset(self):
raise RuntimeError()
class MockEnvironmentCrashInStep(random_py_environment.RandomPyEnvironment):
"""Raise an error after specified number of steps in an episode."""
def __init__(self, crash_at_step):
super(MockEnvironmentCrashInStep, self).__init__(
array_spec.ArraySpec((3, 3), np.float32),
array_spec.BoundedArraySpec([1], np.float32, minimum=-1.0, maximum=1.0),
episode_end_probability=0,
min_duration=crash_at_step + 1,
max_duration=crash_at_step + 1)
self._crash_at_step = crash_at_step
self._steps = 0
def _step(self, *args, **kwargs):
transition = super(MockEnvironmentCrashInStep, self)._step(*args, **kwargs)
self._steps += 1
if self._steps == self._crash_at_step:
raise RuntimeError()
return transition
if __name__ == '__main__':
tf.test.main()
| [
"tensorflow.test.main",
"tf_agents.specs.array_spec.BoundedArraySpec",
"tf_agents.environments.parallel_py_environment.ParallelPyEnvironment",
"functools.partial",
"tf_agents.environments.parallel_py_environment.ProcessPyEnvironment",
"tf_agents.specs.array_spec.ArraySpec",
"tf_agents.environments.time_... | [((7913, 7927), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (7925, 7927), True, 'import tensorflow as tf\n'), ((1200, 1240), 'tf_agents.specs.array_spec.ArraySpec', 'array_spec.ArraySpec', (['(3, 3)', 'np.float32'], {}), '((3, 3), np.float32)\n', (1220, 1240), False, 'from tf_agents.specs import array_spec\n'), ((1267, 1307), 'tf_agents.environments.time_step.time_step_spec', 'ts.time_step_spec', (['self.observation_spec'], {}), '(self.observation_spec)\n', (1284, 1307), True, 'from tf_agents.environments import time_step as ts\n'), ((1331, 1408), 'tf_agents.specs.array_spec.BoundedArraySpec', 'array_spec.BoundedArraySpec', (['[7]'], {'dtype': 'np.float32', 'minimum': '(-1.0)', 'maximum': '(1.0)'}), '([7], dtype=np.float32, minimum=-1.0, maximum=1.0)\n', (1358, 1408), False, 'from tf_agents.specs import array_spec\n'), ((1589, 1697), 'tf_agents.environments.parallel_py_environment.ParallelPyEnvironment', 'parallel_py_environment.ParallelPyEnvironment', ([], {'env_constructors': '([constructor] * num_envs)', 'blocking': '(True)'}), '(env_constructors=[constructor\n ] * num_envs, blocking=True)\n', (1634, 1697), False, 'from tf_agents.environments import parallel_py_environment\n'), ((2289, 2312), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (2310, 2312), True, 'import numpy as np\n'), ((3201, 3224), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (3222, 3224), True, 'import numpy as np\n'), ((3811, 3834), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (3832, 3834), True, 'import numpy as np\n'), ((4072, 4135), 'collections.namedtuple', 'collections.namedtuple', (['"""NestedAction"""', "['action', 'other_var']"], {}), "('NestedAction', ['action', 'other_var'])\n", (4094, 4135), False, 'import collections\n'), ((4949, 5006), 'tf_agents.environments.parallel_py_environment.ProcessPyEnvironment', 'parallel_py_environment.ProcessPyEnvironment', (['constructor'], {}), '(constructor)\n', (4993, 5006), False, 'from tf_agents.environments import parallel_py_environment\n'), ((5379, 5402), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (5400, 5402), True, 'import numpy as np\n'), ((5413, 5470), 'tf_agents.environments.parallel_py_environment.ProcessPyEnvironment', 'parallel_py_environment.ProcessPyEnvironment', (['constructor'], {}), '(constructor)\n', (5457, 5470), False, 'from tf_agents.environments import parallel_py_environment\n'), ((5781, 5838), 'tf_agents.environments.parallel_py_environment.ProcessPyEnvironment', 'parallel_py_environment.ProcessPyEnvironment', (['constructor'], {}), '(constructor)\n', (5825, 5838), False, 'from tf_agents.environments import parallel_py_environment\n'), ((5998, 6055), 'tf_agents.environments.parallel_py_environment.ProcessPyEnvironment', 'parallel_py_environment.ProcessPyEnvironment', (['constructor'], {}), '(constructor)\n', (6042, 6055), False, 'from tf_agents.environments import parallel_py_environment\n'), ((6192, 6254), 'functools.partial', 'functools.partial', (['MockEnvironmentCrashInStep'], {'crash_at_step': '(3)'}), '(MockEnvironmentCrashInStep, crash_at_step=3)\n', (6209, 6254), False, 'import functools\n'), ((6274, 6331), 'tf_agents.environments.parallel_py_environment.ProcessPyEnvironment', 'parallel_py_environment.ProcessPyEnvironment', (['constructor'], {}), '(constructor)\n', (6318, 6331), False, 'from tf_agents.environments import parallel_py_environment\n'), ((6410, 6433), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (6431, 6433), True, 'import numpy as np\n'), ((1451, 1557), 'functools.partial', 'functools.partial', (['random_py_environment.RandomPyEnvironment', 'self.observation_spec', 'self.action_spec'], {}), '(random_py_environment.RandomPyEnvironment, self.\n observation_spec, self.action_spec)\n', (1468, 1557), False, 'import functools\n'), ((4749, 4789), 'tf_agents.specs.array_spec.ArraySpec', 'array_spec.ArraySpec', (['(3, 3)', 'np.float32'], {}), '((3, 3), np.float32)\n', (4769, 4789), False, 'from tf_agents.specs import array_spec\n'), ((4799, 4870), 'tf_agents.specs.array_spec.BoundedArraySpec', 'array_spec.BoundedArraySpec', (['[1]', 'np.float32'], {'minimum': '(-1.0)', 'maximum': '(1.0)'}), '([1], np.float32, minimum=-1.0, maximum=1.0)\n', (4826, 4870), False, 'from tf_agents.specs import array_spec\n'), ((5179, 5219), 'tf_agents.specs.array_spec.ArraySpec', 'array_spec.ArraySpec', (['(3, 3)', 'np.float32'], {}), '((3, 3), np.float32)\n', (5199, 5219), False, 'from tf_agents.specs import array_spec\n'), ((5229, 5300), 'tf_agents.specs.array_spec.BoundedArraySpec', 'array_spec.BoundedArraySpec', (['[1]', 'np.float32'], {'minimum': '(-1.0)', 'maximum': '(1.0)'}), '([1], np.float32, minimum=-1.0, maximum=1.0)\n', (5256, 5300), False, 'from tf_agents.specs import array_spec\n'), ((5552, 5600), 'tf_agents.specs.array_spec.sample_bounded_spec', 'array_spec.sample_bounded_spec', (['action_spec', 'rng'], {}), '(action_spec, rng)\n', (5582, 5600), False, 'from tf_agents.specs import array_spec\n'), ((5615, 5663), 'tf_agents.specs.array_spec.sample_bounded_spec', 'array_spec.sample_bounded_spec', (['action_spec', 'rng'], {}), '(action_spec, rng)\n', (5645, 5663), False, 'from tf_agents.specs import array_spec\n'), ((6447, 6495), 'tf_agents.specs.array_spec.sample_bounded_spec', 'array_spec.sample_bounded_spec', (['action_spec', 'rng'], {}), '(action_spec, rng)\n', (6477, 6495), False, 'from tf_agents.specs import array_spec\n'), ((6510, 6558), 'tf_agents.specs.array_spec.sample_bounded_spec', 'array_spec.sample_bounded_spec', (['action_spec', 'rng'], {}), '(action_spec, rng)\n', (6540, 6558), False, 'from tf_agents.specs import array_spec\n'), ((7354, 7394), 'tf_agents.specs.array_spec.ArraySpec', 'array_spec.ArraySpec', (['(3, 3)', 'np.float32'], {}), '((3, 3), np.float32)\n', (7374, 7394), False, 'from tf_agents.specs import array_spec\n'), ((7404, 7475), 'tf_agents.specs.array_spec.BoundedArraySpec', 'array_spec.BoundedArraySpec', (['[1]', 'np.float32'], {'minimum': '(-1.0)', 'maximum': '(1.0)'}), '([1], np.float32, minimum=-1.0, maximum=1.0)\n', (7431, 7475), False, 'from tf_agents.specs import array_spec\n'), ((2336, 2384), 'tf_agents.specs.array_spec.sample_bounded_spec', 'array_spec.sample_bounded_spec', (['action_spec', 'rng'], {}), '(action_spec, rng)\n', (2366, 2384), False, 'from tf_agents.specs import array_spec\n'), ((3256, 3304), 'tf_agents.specs.array_spec.sample_bounded_spec', 'array_spec.sample_bounded_spec', (['action_spec', 'rng'], {}), '(action_spec, rng)\n', (3286, 3304), False, 'from tf_agents.specs import array_spec\n'), ((3866, 3914), 'tf_agents.specs.array_spec.sample_bounded_spec', 'array_spec.sample_bounded_spec', (['action_spec', 'rng'], {}), '(action_spec, rng)\n', (3896, 3914), False, 'from tf_agents.specs import array_spec\n'), ((4257, 4284), 'numpy.array', 'np.array', (['([13.0] * num_envs)'], {}), '([13.0] * num_envs)\n', (4265, 4284), True, 'import numpy as np\n'), ((6614, 6662), 'tf_agents.specs.array_spec.sample_bounded_spec', 'array_spec.sample_bounded_spec', (['action_spec', 'rng'], {}), '(action_spec, rng)\n', (6644, 6662), False, 'from tf_agents.specs import array_spec\n')] |
"""
Utility classes and functions used in this library
"""
import numpy as np
import heapq
import numba
import numba.experimental
class InvalidPrefsError(Exception):
"""
Exception called when input preferences are invalid.
"""
pass
class InvalidCapsError(Exception):
"""
Exception called when input caps are invalid.
"""
pass
class InvalidRegionError(Exception):
"""
Exception called when input regions are invalid.
"""
pass
heap_spec = [
("length", numba.i8),
("arr_size", numba.i8),
("array", numba.i8[:]),
]
@numba.experimental.jitclass(heap_spec)
class MaxHeap(object):
def __init__(self, arr_size):
self.length = 0
self.arr_size = arr_size
self.array = np.empty(arr_size, dtype=numba.i8)
@staticmethod
def _comp(elem1, elem2):
return elem1 >= elem2
def _swap(self, ind1, ind2):
temp = self.array[ind1]
self.array[ind1] = self.array[ind2]
self.array[ind2] = temp
def _shiftup(self, index):
while index > 0:
parent = (index-1) // 2
if self._comp(self.array[parent], self.array[index]):
break
self._swap(parent, index)
index = parent
def _shiftdown(self):
index = 0
left_child, right_child = 1, 2
while left_child < self.length:
if right_child < self.length:
if self._comp(self.array[left_child], self.array[right_child]):
larger_child = left_child
else:
larger_child = right_child
else:
larger_child = left_child
if self._comp(self.array[index], self.array[larger_child]):
break
self._swap(index, larger_child)
index = larger_child
left_child = 2 * index + 1
right_child = 2 * index + 2
def push(self, value):
if self.length == self.arr_size:
raise IndexError(
"The heap is full (its length already reaches `arr_size`).")
self.array[self.length] = value
self.length += 1
self._shiftup(self.length-1)
def pop(self):
if self.length == 0:
raise IndexError("The heap is empty.")
self.length -= 1
elem = self.array[0]
self.array[0] = self.array[self.length]
self._shiftdown()
return elem
def replace(self, value):
elem = self.array[0]
self.array[0] = value
self._shiftdown()
return elem
def values(self):
return self.array[:self.length]
def root(self):
if self.length == 0:
raise IndexError("The heap is empty.")
return self.array[0]
def is_full(self):
return self.length == self.arr_size
@numba.experimental.jitclass(heap_spec)
class MinHeap(object):
"""
Currently inheritance from a numba.jitclass is not supported.
"""
def __init__(self, arr_size):
self.length = 0
self.arr_size = arr_size
self.array = np.empty(arr_size, dtype=numba.i8)
@staticmethod
def _comp(elem1, elem2):
return elem1 <= elem2
def _swap(self, ind1, ind2):
temp = self.array[ind1]
self.array[ind1] = self.array[ind2]
self.array[ind2] = temp
def _shiftup(self, index):
while index > 0:
parent = (index-1) // 2
if self._comp(self.array[parent], self.array[index]):
break
self._swap(parent, index)
index = parent
def _shiftdown(self):
index = 0
left_child, right_child = 1, 2
while left_child < self.length:
if right_child < self.length:
if self._comp(self.array[left_child], self.array[right_child]):
larger_child = left_child
else:
larger_child = right_child
else:
larger_child = left_child
if self._comp(self.array[index], self.array[larger_child]):
break
self._swap(index, larger_child)
index = larger_child
left_child = 2 * index + 1
right_child = 2 * index + 2
def push(self, value):
if self.length == self.arr_size:
raise IndexError(
"The heap is full (its length already reaches `arr_size`).")
self.array[self.length] = value
self.length += 1
self._shiftup(self.length-1)
def pop(self):
if self.length == 0:
raise IndexError("The heap is empty.")
self.length -= 1
elem = self.array[0]
self.array[0] = self.array[self.length]
self._shiftdown()
return elem
def replace(self, value):
elem = self.array[0]
self.array[0] = value
self._shiftdown()
return elem
def values(self):
return self.array[:self.length]
def root(self):
if self.length == 0:
raise IndexError("The heap is empty.")
return self.array[0]
def is_full(self):
return self.length == self.arr_size
def shuffle_each_row_prev(
arr,
random_generator,
outside_option=None,
allow_op_first=False
):
x, y = arr.shape
rows = np.indices((x, y))[0]
cols = [random_generator.permutation(y) for _ in range(x)]
if (outside_option is not None) and (not allow_op_first):
while True:
invalid_rows = np.where(arr[rows, cols][:, 0] == outside_option)[0]
if len(invalid_rows) == 0:
break
new_cols = [random_generator.permutation(y) for _ in range(x)]
for r in invalid_rows:
cols[r] = new_cols[r]
return arr[rows, cols]
def to_probability(li):
return li / np.sum(li)
def shuffle_list(
li,
size=1,
probs=None,
outside_option=None,
random_generator=None
):
"""
Args:
li : 1d array-like(int)
The list to be shuffled.
size : int, optional
The sample size of shuffle trials.
probs : 1d array-like(float), optional
The probability each element of `li` is drawn. The size of `probs`
should be same as that of `li`. Each element should be >= 0.
If None, then probs will be uniform over the list.
outside_option : int or None, optional
An integer that is in `li`. If not None, then the value will never
be at the beginning of the shuffled list.
random_generator : numpy.random.Generator, optional
The random generator. If None, then a generator is initialized in
this function.
Return:
shuffled_lists : 2d-array(int)
The list of shuffled lists. shape=(size, len(li)).
"""
li = np.array(li)
list_size = len(li)
indexes = np.arange(list_size)
shuffled_lists = np.empty(shape=(size, list_size), dtype=int)
if random_generator is None:
random_generator = np.random.default_rng()
if probs is None:
probs = np.ones(list_size) / list_size
else:
probs = np.array(probs)
if len(probs) != list_size:
raise ValueError(f"The size of `li` and `probs` must be the same.")
if np.sum(probs <= 0) > 0:
raise ValueError(f"Elements of `probs` must be strictly greter than 0.")
if outside_option is None:
# If outside_option is not specified, simply shuffle the list.
probs = to_probability(probs)
for i in range(size):
shuffled_lists[i, :] = random_generator.choice(
indexes,
size=list_size,
replace=False,
p=probs
)
else:
# If outside_option is specified, then
# 1. randomly choose the top elements from the list except
# the outside option,
# 2. randomly shuffle the remaining elements and the outside option.
op_indexes = indexes[li == outside_option]
if len(op_indexes) == 0:
raise ValueError(f"`outside_option`: {outside_option} is not in `li`.")
op_index = op_indexes[0]
probs_without_op = np.copy(probs)
probs_without_op[op_index] = 0
probs_without_op = to_probability(probs_without_op)
for i in range(size):
shuffled_lists[i, 0] = random_generator.choice(
indexes,
size=1,
replace=False,
p=probs_without_op
)
for i in range(size):
probs_remaining = np.copy(probs)
probs_remaining[shuffled_lists[i, 0]] = 0
probs_remaining = to_probability(probs_remaining)
shuffled_lists[i, 1:] = random_generator.choice(
indexes,
size=list_size-1,
replace=False,
p=probs_remaining
)
return li[shuffled_lists]
def generate_random_prefs(
num_agents,
num_objects,
outside_option=False,
random_generator=None
):
"""
Randomly generate preference lists of agents.
"""
if random_generator is None:
random_generator = np.random.default_rng()
if outside_option:
len_pref = num_objects + 1
op = num_objects
else:
len_pref = num_objects
op = None
prefs = shuffle_list(
np.arange(len_pref),
size=num_agents,
probs=None,
outside_option=op,
random_generator=random_generator
)
return prefs
def generate_prefs_from_scores(
num_agents,
num_objects,
scores,
outside_score=None,
random_generator=None
):
if random_generator is None:
random_generator = np.random.default_rng()
if type(scores) is np.ndarray:
scores = scores.tolist()
if outside_score is not None:
scores.append(outside_score)
# normalize score (logit)
probs = to_probability(np.exp(scores))
if outside_score is None:
prefs = shuffle_list(
np.arange(num_objects),
size=num_agents,
probs=probs,
outside_option=None,
random_generator=random_generator
)
else:
prefs = shuffle_list(
np.arange(num_objects+1),
size=num_agents,
probs=probs,
outside_option=num_objects,
random_generator=random_generator
)
return prefs
def generate_prefs_from_random_scores(
num_agents,
num_objects,
outside_score=None,
random_type="normal",
random_generator=None
):
"""
Args:
num_agents : int(>0)
The length of preference lists.
num_objects : int(>0)
The size of objects over which each agent's preference is defined.
outside_score : float(0<=x<=1) or None
Relative "strength" of the outside option. If None is set,
then outside option will not be included in the preferences.
random_type : str, optional. In ['normal', 'cauchy', 'lognormal']
The probability distribtuion of the score.
random_generator : numpy.random.Generator, optional
The random generator. If None, then a generator is initialized in
this function.
Return:
prefs : 2d-array(int)
The list of agents' preferences over the objects and the outside option.
The elements must be 0 <= x <= num_objects.
The number `num_objects` is considered as an outside option.
"""
if random_generator is None:
random_generator = np.random.default_rng()
if outside_score is not None:
if outside_score < 0 or 1 < outside_score:
raise ValueError(f"`outside_score` must be 0 <= x <= 1")
adjusted_op_score = None
if random_type == "normal":
# assign scores with normal
scale = 1.0
scores = random_generator.normal(size=num_objects, scale=scale)
if outside_score is not None:
# convert [0, 1] -> [-3\sigma, 3\sigma]
adjusted_op_score = (outside_score - 0.5) * (3 * scale / 0.5)
elif random_type == "cauchy":
# assign scores with cauchy
scores = random_generator.standard_cauchy(size=num_objects)
if outside_score is not None:
# convert [0, 1] -> [-3, 3]
adjusted_op_score = (outside_score - 0.5) * (3 / 0.5)
elif random_type == "lognormal":
# assign scores with log normal
sigma = 1.0
scores = random_generator.lognormal(size=num_objects, sigma=sigma)
if outside_score is not None:
# convert [0, 1] -> [-3\sigma, 3\sigma]
mean = np.exp(np.power(sigma, 2) / 2)
std = np.sqrt(np.exp(np.power(sigma, 2)) * (np.exp(np.power(sigma, 2)) - 1))
adjusted_op_score = mean + (outside_score - 0.5) * (3 * std / 0.5)
else:
raise ValueError("`random_type` must be in ['normal', 'cauchy', 'lognormal'].")
prefs = generate_prefs_from_scores(
num_agents,
num_objects,
scores,
outside_score=adjusted_op_score,
random_generator=random_generator
)
return prefs
def round_caps_to_meet_sum(li, target_sum, random_generator=None):
li = np.array(li)
total = np.sum(li)
if total <= target_sum:
return li
base_li = li * target_sum / total
rounded_li = np.floor(base_li)
rounded_total = np.sum(rounded_li)
# For breaking ties
temp = np.empty([2, len(li)],
dtype=[("value", float), ("breaking_tie_order", int)])
temp["value"] = -1 * (base_li - rounded_li)
if random_generator is None:
temp["breaking_tie_order"] = np.arange(len(li))
else:
order = np.arange(len(li))
random_generator.shuffle(order)
temp["breaking_tie_order"] = order
surplus_ordered_indices = np.argsort(
temp, order=["value", "breaking_tie_order"])[0]
rounded_li[surplus_ordered_indices[0:int(target_sum-rounded_total)]] += 1
return rounded_li.astype(int)
def generate_caps_given_sum(len_list, target_sum, random_generator=None):
if random_generator is None:
random_generator = np.random.default_rng()
# If target_sum > len_list, then set min(caps) == 1.
original_target_sum = target_sum
if target_sum > len_list:
target_sum -= len_list
caps = random_generator.gamma(
shape=np.sqrt(target_sum),
scale=np.sqrt(target_sum),
size=len_list
)
caps = np.round(caps).astype(int)
caps = round_caps_to_meet_sum(caps, target_sum)
if original_target_sum > len_list:
caps += 1
return caps
if __name__ == "__main__":
pass
| [
"numpy.sum",
"numpy.copy",
"numpy.empty",
"numpy.floor",
"numpy.power",
"numba.experimental.jitclass",
"numpy.ones",
"numpy.random.default_rng",
"numpy.indices",
"numpy.argsort",
"numpy.where",
"numpy.array",
"numpy.arange",
"numpy.exp",
"numpy.round",
"numpy.sqrt"
] | [((586, 624), 'numba.experimental.jitclass', 'numba.experimental.jitclass', (['heap_spec'], {}), '(heap_spec)\n', (613, 624), False, 'import numba\n'), ((2883, 2921), 'numba.experimental.jitclass', 'numba.experimental.jitclass', (['heap_spec'], {}), '(heap_spec)\n', (2910, 2921), False, 'import numba\n'), ((7018, 7030), 'numpy.array', 'np.array', (['li'], {}), '(li)\n', (7026, 7030), True, 'import numpy as np\n'), ((7069, 7089), 'numpy.arange', 'np.arange', (['list_size'], {}), '(list_size)\n', (7078, 7089), True, 'import numpy as np\n'), ((7111, 7155), 'numpy.empty', 'np.empty', ([], {'shape': '(size, list_size)', 'dtype': 'int'}), '(shape=(size, list_size), dtype=int)\n', (7119, 7155), True, 'import numpy as np\n'), ((13646, 13658), 'numpy.array', 'np.array', (['li'], {}), '(li)\n', (13654, 13658), True, 'import numpy as np\n'), ((13671, 13681), 'numpy.sum', 'np.sum', (['li'], {}), '(li)\n', (13677, 13681), True, 'import numpy as np\n'), ((13785, 13802), 'numpy.floor', 'np.floor', (['base_li'], {}), '(base_li)\n', (13793, 13802), True, 'import numpy as np\n'), ((13823, 13841), 'numpy.sum', 'np.sum', (['rounded_li'], {}), '(rounded_li)\n', (13829, 13841), True, 'import numpy as np\n'), ((760, 794), 'numpy.empty', 'np.empty', (['arr_size'], {'dtype': 'numba.i8'}), '(arr_size, dtype=numba.i8)\n', (768, 794), True, 'import numpy as np\n'), ((3139, 3173), 'numpy.empty', 'np.empty', (['arr_size'], {'dtype': 'numba.i8'}), '(arr_size, dtype=numba.i8)\n', (3147, 3173), True, 'import numpy as np\n'), ((5411, 5429), 'numpy.indices', 'np.indices', (['(x, y)'], {}), '((x, y))\n', (5421, 5429), True, 'import numpy as np\n'), ((5956, 5966), 'numpy.sum', 'np.sum', (['li'], {}), '(li)\n', (5962, 5966), True, 'import numpy as np\n'), ((7217, 7240), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (7238, 7240), True, 'import numpy as np\n'), ((7342, 7357), 'numpy.array', 'np.array', (['probs'], {}), '(probs)\n', (7350, 7357), True, 'import numpy as np\n'), ((8420, 8434), 'numpy.copy', 'np.copy', (['probs'], {}), '(probs)\n', (8427, 8434), True, 'import numpy as np\n'), ((9443, 9466), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (9464, 9466), True, 'import numpy as np\n'), ((9645, 9664), 'numpy.arange', 'np.arange', (['len_pref'], {}), '(len_pref)\n', (9654, 9664), True, 'import numpy as np\n'), ((10005, 10028), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (10026, 10028), True, 'import numpy as np\n'), ((10236, 10250), 'numpy.exp', 'np.exp', (['scores'], {}), '(scores)\n', (10242, 10250), True, 'import numpy as np\n'), ((11948, 11971), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (11969, 11971), True, 'import numpy as np\n'), ((14270, 14325), 'numpy.argsort', 'np.argsort', (['temp'], {'order': "['value', 'breaking_tie_order']"}), "(temp, order=['value', 'breaking_tie_order'])\n", (14280, 14325), True, 'import numpy as np\n'), ((14586, 14609), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (14607, 14609), True, 'import numpy as np\n'), ((7280, 7298), 'numpy.ones', 'np.ones', (['list_size'], {}), '(list_size)\n', (7287, 7298), True, 'import numpy as np\n'), ((7487, 7505), 'numpy.sum', 'np.sum', (['(probs <= 0)'], {}), '(probs <= 0)\n', (7493, 7505), True, 'import numpy as np\n'), ((8818, 8832), 'numpy.copy', 'np.copy', (['probs'], {}), '(probs)\n', (8825, 8832), True, 'import numpy as np\n'), ((10325, 10347), 'numpy.arange', 'np.arange', (['num_objects'], {}), '(num_objects)\n', (10334, 10347), True, 'import numpy as np\n'), ((10547, 10573), 'numpy.arange', 'np.arange', (['(num_objects + 1)'], {}), '(num_objects + 1)\n', (10556, 10573), True, 'import numpy as np\n'), ((14816, 14835), 'numpy.sqrt', 'np.sqrt', (['target_sum'], {}), '(target_sum)\n', (14823, 14835), True, 'import numpy as np\n'), ((14852, 14871), 'numpy.sqrt', 'np.sqrt', (['target_sum'], {}), '(target_sum)\n', (14859, 14871), True, 'import numpy as np\n'), ((14913, 14927), 'numpy.round', 'np.round', (['caps'], {}), '(caps)\n', (14921, 14927), True, 'import numpy as np\n'), ((5611, 5660), 'numpy.where', 'np.where', (['(arr[rows, cols][:, 0] == outside_option)'], {}), '(arr[rows, cols][:, 0] == outside_option)\n', (5619, 5660), True, 'import numpy as np\n'), ((13065, 13083), 'numpy.power', 'np.power', (['sigma', '(2)'], {}), '(sigma, 2)\n', (13073, 13083), True, 'import numpy as np\n'), ((13122, 13140), 'numpy.power', 'np.power', (['sigma', '(2)'], {}), '(sigma, 2)\n', (13130, 13140), True, 'import numpy as np\n'), ((13152, 13170), 'numpy.power', 'np.power', (['sigma', '(2)'], {}), '(sigma, 2)\n', (13160, 13170), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
from data import shuffle
import math
from tqdm import tqdm
from sklearn.metrics import roc_auc_score
class Model(object):
def __init__(self):
tf.reset_default_graph()
self.X = tf.placeholder(tf.float32, [None, 88, 200, 3])
self.Y = tf.placeholder(tf.float32, [None, 1])
self.keep_prob = tf.placeholder_with_default(1.0, shape=())
self.training = tf.placeholder(tf.bool, name='is_training')
self.initModel()
self.buildModel()
self.saver = tf.train.Saver()
def initModel(self):
raise NotImplementedError
def buildModel(self):
self.logits = self.net(self.X)
self.prediction = tf.nn.sigmoid(self.logits)
self.correct_pred = tf.equal(tf.round(self.prediction), self.Y)
self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))
self.loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=self.logits, labels=self.Y))
self.regularizer = tf.add_n([tf.nn.l2_loss(w) for w in list(self.weights.values())])
def net(self):
raise NotImplementedError
def conv2d(self, x, W, b, strides, batch_norm=True):
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
x = tf.nn.bias_add(x, b)
if batch_norm:
x = tf.layers.batch_normalization(x, training=self.training)
x = tf.nn.relu(x)
return x
def maxpool2d(self, x, k):
return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')
def fc(self, x, W, b, batch_norm=True, activation=True):
x = tf.matmul(x, W) + b
if batch_norm:
x = tf.layers.batch_normalization(x, training=self.training)
if activation:
x = tf.nn.relu(x)
return x
def resblock(self, x, W1, b1, W2, b2, strides, batch_norm=True):
f = tf.nn.conv2d(x, W1, strides=[1, strides, strides, 1], padding='SAME')
f = tf.nn.bias_add(f, b1)
if batch_norm:
f = tf.layers.batch_normalization(f, training=self.training)
f = tf.nn.relu(f)
f = tf.nn.conv2d(f, W2, strides=[1, strides, strides, 1], padding='SAME')
f = tf.nn.bias_add(f, b2)
if batch_norm:
f = tf.layers.batch_normalization(f, training=self.training)
x = f + x
f = tf.nn.relu(f)
return x
def train(self, trainInput, testInput, trainTarget, testTarget, \
reg_lambda=0.0, learning_rate=1e-4, dropout=0.0, batch_size=32, epochs=50, \
restore_model=False, save_model=True, save_freq=5):
self.loss_op = self.loss + reg_lambda*self.regularizer
#self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
self.update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.train_op = self.optimizer.minimize(self.loss_op)
self.train_op = tf.group([self.train_op, self.update_ops])
print("Training...")
total_num_input = trainInput.shape[0]
steps = math.ceil(total_num_input / batch_size)
train_loss_history, test_loss_history, \
train_accuracy_history, test_accuracy_history = [], [], [], []
with tf.Session() as sess:
tf.gfile.MakeDirs(self.save_folder)
if restore_model:
lastckpt = tf.train.latest_checkpoint(self.save_folder)
print("Restoring from {}".format(lastckpt))
self.saver = tf.train.import_meta_graph(lastckpt+'.meta')
self.saver.restore(sess, lastckpt)
else:
sess.run(tf.global_variables_initializer())
for e in range(1, epochs+1):
X, Y = shuffle(trainInput, trainTarget)
print("Training phase:")
for i in tqdm(range(0, steps)):
X_batch = X[i * batch_size:(i + 1) * batch_size, :]
Y_batch = Y[i * batch_size:(i + 1) * batch_size, :]
n_batch = X_batch.shape[0]
_, = sess.run([self.train_op], feed_dict={
self.X: X_batch,
self.Y: Y_batch,
self.keep_prob: 1-dropout,
self.training: True})
# Need to run in non-training mode for batch norm and dropout
print("Test phase:")
train_loss, train_accuracy, train_prediction = self._test(sess, trainInput, trainTarget, batch_size=batch_size)
test_loss, test_accuracy, test_prediction = self._test(sess, testInput, testTarget, batch_size=batch_size)
train_auc = roc_auc_score(trainTarget, train_prediction)
test_auc = roc_auc_score(testTarget, test_prediction)
train_loss_history.append(train_loss)
train_accuracy_history.append(train_accuracy)
test_loss_history.append(test_loss)
test_accuracy_history.append(test_accuracy)
print('Epoch %3d ==> Train Loss: %.4f, Train AUC: %.4f, Test Loss: %.4f, Test AUC: %.4f' % \
(e, train_loss_history[-1], train_auc, test_loss_history[-1], test_auc))
if save_model and (e % save_freq == 0):
self.saver.save(sess, self.save_folder+'model.ckpt', global_step=e)
if save_model:
self.saver.save(sess, self.save_folder+'model.ckpt', global_step=epochs)
loss_history = {
"train": train_loss_history,
"test": test_loss_history
}
accuracy_history = {
"train": train_accuracy_history,
"test": test_accuracy_history
}
return loss_history, accuracy_history
def test(self, Input, Target, batch_size=32):
with tf.Session() as sess:
lastckpt = tf.train.latest_checkpoint(self.save_folder)
print("Restoring from {}".format(lastckpt))
self.saver = tf.train.import_meta_graph(lastckpt+'.meta')
self.saver.restore(sess, lastckpt)
test_loss, test_accuracy, test_prediction = self._test(sess, Input, Target, batch_size=batch_size)
return test_loss, test_accuracy, test_prediction
def _test(self, sess, Input, Target, batch_size=32):
total_num_input = Input.shape[0]
steps = math.ceil(total_num_input / batch_size)
test_loss = 0
test_accuracy = 0
predictions = []
for i in tqdm(range(0, steps)):
X = Input[i * batch_size:(i + 1) * batch_size, :]
Y = Target[i * batch_size:(i + 1) * batch_size, :]
n = X.shape[0]
_loss, _accuracy, _prediction = sess.run(
[self.loss, self.accuracy, self.prediction], feed_dict={
self.X: X,
self.Y: Y,
self.keep_prob: 1.0,
self.training: False})
test_loss += _loss*n
test_accuracy += _accuracy*n
predictions.append(_prediction)
test_loss = test_loss/total_num_input
test_accuracy = test_accuracy/total_num_input
test_prediction = np.concatenate(predictions, axis=0)
return test_loss, test_accuracy, test_prediction
#########
# Multilayered Perception #
#########
class MLP_Model(Model):
def __init__(self):
super().__init__()
self.save_folder = "./models/mlp/"
def initModel(self):
self.weights = {
'w_hidden1' : tf.get_variable(name="WH1", shape=[88*200*3, 4096], initializer=tf.contrib.layers.xavier_initializer()),
'w_hidden2' : tf.get_variable(name="WH2", shape=[4096, 1024], initializer=tf.contrib.layers.xavier_initializer()),
'w_hidden3' : tf.get_variable(name="WH3", shape=[1024, 256], initializer=tf.contrib.layers.xavier_initializer()),
'out' : tf.get_variable(name="WOUT", shape=[256, 1], initializer=tf.contrib.layers.xavier_initializer())
}
self.biases = {
'b_hidden1': tf.get_variable(name="BH1", shape=[4096], initializer=tf.contrib.layers.xavier_initializer()),
'b_hidden2': tf.get_variable(name="BH2", shape=[1024], initializer=tf.contrib.layers.xavier_initializer()),
'b_hidden3': tf.get_variable(name="BH3", shape=[256], initializer=tf.contrib.layers.xavier_initializer()),
'out': tf.get_variable(name="BOUT", shape=[1], initializer=tf.contrib.layers.xavier_initializer())
}
def net(self, x):
weights = self.weights
biases = self.biases
input_layer = tf.reshape(x, [-1, 88*200*3])
fc1 = self.fc(input_layer, weights['w_hidden1'], biases['b_hidden1'])
fc2 = self.fc(fc1, weights['w_hidden2'], biases['b_hidden2'])
fc3 = self.fc(fc2, weights['w_hidden3'], biases['b_hidden3'])
fc3 = tf.nn.dropout(fc3, self.keep_prob)
out = self.fc(fc3, weights['out'], biases['out'], batch_norm=False, activation=False)
return out
#########
# Convolutional Neural Network #
#########
class CNN_Model(Model):
def __init__(self):
super().__init__()
self.save_folder = "./models/cnn/"
# set graph-level random seed
# tf.set_random_seed(421)
def initModel(self):
self.weights = {
'w_conv1' : tf.get_variable(name="WC1", shape=[5, 5, 3, 32], initializer=tf.contrib.layers.xavier_initializer()),
'w_conv2' : tf.get_variable(name="WC2", shape=[5, 5, 32, 64], initializer=tf.contrib.layers.xavier_initializer()),
'w_fc1' : tf.get_variable(name="WD1", shape=[22*50*64, 1024], initializer=tf.contrib.layers.xavier_initializer()),
'w_fc2' : tf.get_variable(name="WD2", shape=[1024, 256], initializer=tf.contrib.layers.xavier_initializer()),
'out' : tf.get_variable(name="WOUT", shape=[256, 1], initializer=tf.contrib.layers.xavier_initializer())
}
self.biases = {
'b_conv1': tf.get_variable(name="BC1", shape=[32], initializer=tf.contrib.layers.xavier_initializer()),
'b_conv2': tf.get_variable(name="BC2", shape=[64], initializer=tf.contrib.layers.xavier_initializer()),
'b_fc1': tf.get_variable(name="BD1", shape=[1024], initializer=tf.contrib.layers.xavier_initializer()),
'b_fc2': tf.get_variable(name="BD2", shape=[256], initializer=tf.contrib.layers.xavier_initializer()),
'out': tf.get_variable(name="BOUT", shape=[1], initializer=tf.contrib.layers.xavier_initializer())
}
def net(self, x):
weights = self.weights
biases = self.biases
input_layer = tf.reshape(x, [-1, 88, 200, 3])
conv1 = self.conv2d(input_layer, weights['w_conv1'], biases['b_conv1'], strides=1)
conv1_pool = self.maxpool2d(conv1, k=2)
conv2 = self.conv2d(conv1_pool, weights['w_conv2'], biases['b_conv2'], strides=1)
conv2_pool = self.maxpool2d(conv2, k=2)
flattened = tf.reshape(conv2_pool, [-1, 22*50*64])
fc1 = self.fc(flattened, weights['w_fc1'], biases['b_fc1'])
fc2 = self.fc(fc1, weights['w_fc2'], biases['b_fc2'])
fc2 = tf.nn.dropout(fc2, self.keep_prob)
out = self.fc(fc2, weights['out'], biases['out'], batch_norm=False, activation=False)
return out
#########
# Residual Neural Network #
#########
class ResNet_Model(Model):
def __init__(self):
super().__init__()
self.save_folder = "./models/resnet/"
# set graph-level random seed
# tf.set_random_seed(421)
def initModel(self):
self.weights = {
'w_res1_1' : tf.get_variable(name="WR1_1", shape=[5, 5, 3, 32], initializer=tf.contrib.layers.xavier_initializer()),
'w_res1_2' : tf.get_variable(name="WR1_2", shape=[5, 5, 32, 3], initializer=tf.contrib.layers.xavier_initializer()),
'w_conv1' : tf.get_variable(name="WC1", shape=[5, 5, 3, 32], initializer=tf.contrib.layers.xavier_initializer()),
'w_res2_1' : tf.get_variable(name="WR2_1", shape=[5, 5, 32, 64], initializer=tf.contrib.layers.xavier_initializer()),
'w_res2_2' : tf.get_variable(name="WR2_2", shape=[5, 5, 64, 32], initializer=tf.contrib.layers.xavier_initializer()),
'w_conv2' : tf.get_variable(name="WC2", shape=[5, 5, 32, 64], initializer=tf.contrib.layers.xavier_initializer()),
'w_fc1' : tf.get_variable(name="WD1", shape=[22*50*64, 1024], initializer=tf.contrib.layers.xavier_initializer()),
'w_fc2' : tf.get_variable(name="WD2", shape=[1024, 256], initializer=tf.contrib.layers.xavier_initializer()),
'out' : tf.get_variable(name="WOUT", shape=[256, 1], initializer=tf.contrib.layers.xavier_initializer())
}
self.biases = {
'b_res1_1': tf.get_variable(name="BR1_1", shape=[32], initializer=tf.contrib.layers.xavier_initializer()),
'b_res1_2': tf.get_variable(name="BR1_2", shape=[3], initializer=tf.contrib.layers.xavier_initializer()),
'b_conv1': tf.get_variable(name="BC1", shape=[32], initializer=tf.contrib.layers.xavier_initializer()),
'b_res2_1': tf.get_variable(name="BR2_1", shape=[64], initializer=tf.contrib.layers.xavier_initializer()),
'b_res2_2': tf.get_variable(name="BR2_2", shape=[32], initializer=tf.contrib.layers.xavier_initializer()),
'b_conv2': tf.get_variable(name="BC2", shape=[64], initializer=tf.contrib.layers.xavier_initializer()),
'b_fc1': tf.get_variable(name="BD1", shape=[1024], initializer=tf.contrib.layers.xavier_initializer()),
'b_fc2': tf.get_variable(name="BD2", shape=[256], initializer=tf.contrib.layers.xavier_initializer()),
'out': tf.get_variable(name="BOUT", shape=[1], initializer=tf.contrib.layers.xavier_initializer())
}
def net(self, x):
weights = self.weights
biases = self.biases
input_layer = tf.reshape(x, [-1, 88, 200, 3])
res1 = self.resblock(input_layer, weights['w_res1_1'], biases['b_res1_1'], \
weights['w_res1_2'], biases['b_res1_2'], strides=1)
conv1 = self.conv2d(res1, weights['w_conv1'], biases['b_conv1'], strides=1)
conv1_pool = self.maxpool2d(conv1, k=2)
res2 = self.resblock(conv1_pool, weights['w_res2_1'], biases['b_res2_1'], \
weights['w_res2_2'], biases['b_res2_2'], strides=1)
conv2 = self.conv2d(res2, weights['w_conv2'], biases['b_conv2'], strides=1)
conv2_pool = self.maxpool2d(conv2, k=2)
flattened = tf.reshape(conv2_pool, [-1, 22*50*64])
fc1 = self.fc(flattened, weights['w_fc1'], biases['b_fc1'])
fc2 = self.fc(fc1, weights['w_fc2'], biases['b_fc2'])
fc2 = tf.nn.dropout(fc2, self.keep_prob)
out = self.fc(fc2, weights['out'], biases['out'], batch_norm=False, activation=False)
return out
| [
"tensorflow.contrib.layers.xavier_initializer",
"data.shuffle",
"tensorflow.get_collection",
"tensorflow.reset_default_graph",
"tensorflow.reshape",
"tensorflow.nn.sigmoid_cross_entropy_with_logits",
"tensorflow.matmul",
"tensorflow.train.latest_checkpoint",
"tensorflow.nn.conv2d",
"tensorflow.lay... | [((209, 233), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (231, 233), True, 'import tensorflow as tf\n'), ((254, 300), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 88, 200, 3]'], {}), '(tf.float32, [None, 88, 200, 3])\n', (268, 300), True, 'import tensorflow as tf\n'), ((319, 356), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 1]'], {}), '(tf.float32, [None, 1])\n', (333, 356), True, 'import tensorflow as tf\n'), ((383, 425), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', (['(1.0)'], {'shape': '()'}), '(1.0, shape=())\n', (410, 425), True, 'import tensorflow as tf\n'), ((451, 494), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {'name': '"""is_training"""'}), "(tf.bool, name='is_training')\n", (465, 494), True, 'import tensorflow as tf\n'), ((574, 590), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (588, 590), True, 'import tensorflow as tf\n'), ((750, 776), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['self.logits'], {}), '(self.logits)\n', (763, 776), True, 'import tensorflow as tf\n'), ((1270, 1338), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'W'], {'strides': '[1, strides, strides, 1]', 'padding': '"""SAME"""'}), "(x, W, strides=[1, strides, strides, 1], padding='SAME')\n", (1282, 1338), True, 'import tensorflow as tf\n'), ((1352, 1372), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['x', 'b'], {}), '(x, b)\n', (1366, 1372), True, 'import tensorflow as tf\n'), ((1484, 1497), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (1494, 1497), True, 'import tensorflow as tf\n'), ((1566, 1641), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['x'], {'ksize': '[1, k, k, 1]', 'strides': '[1, k, k, 1]', 'padding': '"""SAME"""'}), "(x, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')\n", (1580, 1641), True, 'import tensorflow as tf\n'), ((2005, 2074), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'W1'], {'strides': '[1, strides, strides, 1]', 'padding': '"""SAME"""'}), "(x, W1, strides=[1, strides, strides, 1], padding='SAME')\n", (2017, 2074), True, 'import tensorflow as tf\n'), ((2088, 2109), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['f', 'b1'], {}), '(f, b1)\n', (2102, 2109), True, 'import tensorflow as tf\n'), ((2221, 2234), 'tensorflow.nn.relu', 'tf.nn.relu', (['f'], {}), '(f)\n', (2231, 2234), True, 'import tensorflow as tf\n'), ((2258, 2327), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['f', 'W2'], {'strides': '[1, strides, strides, 1]', 'padding': '"""SAME"""'}), "(f, W2, strides=[1, strides, strides, 1], padding='SAME')\n", (2270, 2327), True, 'import tensorflow as tf\n'), ((2341, 2362), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['f', 'b2'], {}), '(f, b2)\n', (2355, 2362), True, 'import tensorflow as tf\n'), ((2495, 2508), 'tensorflow.nn.relu', 'tf.nn.relu', (['f'], {}), '(f)\n', (2505, 2508), True, 'import tensorflow as tf\n'), ((2941, 2992), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (2963, 2992), True, 'import tensorflow as tf\n'), ((3020, 3062), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS'], {}), '(tf.GraphKeys.UPDATE_OPS)\n', (3037, 3062), True, 'import tensorflow as tf\n'), ((3151, 3193), 'tensorflow.group', 'tf.group', (['[self.train_op, self.update_ops]'], {}), '([self.train_op, self.update_ops])\n', (3159, 3193), True, 'import tensorflow as tf\n'), ((3292, 3331), 'math.ceil', 'math.ceil', (['(total_num_input / batch_size)'], {}), '(total_num_input / batch_size)\n', (3301, 3331), False, 'import math\n'), ((6756, 6795), 'math.ceil', 'math.ceil', (['(total_num_input / batch_size)'], {}), '(total_num_input / batch_size)\n', (6765, 6795), False, 'import math\n'), ((7603, 7638), 'numpy.concatenate', 'np.concatenate', (['predictions'], {'axis': '(0)'}), '(predictions, axis=0)\n', (7617, 7638), True, 'import numpy as np\n'), ((9092, 9125), 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, 88 * 200 * 3]'], {}), '(x, [-1, 88 * 200 * 3])\n', (9102, 9125), True, 'import tensorflow as tf\n'), ((9360, 9394), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['fc3', 'self.keep_prob'], {}), '(fc3, self.keep_prob)\n', (9373, 9394), True, 'import tensorflow as tf\n'), ((11206, 11237), 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, 88, 200, 3]'], {}), '(x, [-1, 88, 200, 3])\n', (11216, 11237), True, 'import tensorflow as tf\n'), ((11546, 11588), 'tensorflow.reshape', 'tf.reshape', (['conv2_pool', '[-1, 22 * 50 * 64]'], {}), '(conv2_pool, [-1, 22 * 50 * 64])\n', (11556, 11588), True, 'import tensorflow as tf\n'), ((11732, 11766), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['fc2', 'self.keep_prob'], {}), '(fc2, self.keep_prob)\n', (11745, 11766), True, 'import tensorflow as tf\n'), ((14590, 14621), 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, 88, 200, 3]'], {}), '(x, [-1, 88, 200, 3])\n', (14600, 14621), True, 'import tensorflow as tf\n'), ((15250, 15292), 'tensorflow.reshape', 'tf.reshape', (['conv2_pool', '[-1, 22 * 50 * 64]'], {}), '(conv2_pool, [-1, 22 * 50 * 64])\n', (15260, 15292), True, 'import tensorflow as tf\n'), ((15436, 15470), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['fc2', 'self.keep_prob'], {}), '(fc2, self.keep_prob)\n', (15449, 15470), True, 'import tensorflow as tf\n'), ((817, 842), 'tensorflow.round', 'tf.round', (['self.prediction'], {}), '(self.prediction)\n', (825, 842), True, 'import tensorflow as tf\n'), ((892, 930), 'tensorflow.cast', 'tf.cast', (['self.correct_pred', 'tf.float32'], {}), '(self.correct_pred, tf.float32)\n', (899, 930), True, 'import tensorflow as tf\n'), ((970, 1044), 'tensorflow.nn.sigmoid_cross_entropy_with_logits', 'tf.nn.sigmoid_cross_entropy_with_logits', ([], {'logits': 'self.logits', 'labels': 'self.Y'}), '(logits=self.logits, labels=self.Y)\n', (1009, 1044), True, 'import tensorflow as tf\n'), ((1414, 1470), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['x'], {'training': 'self.training'}), '(x, training=self.training)\n', (1443, 1470), True, 'import tensorflow as tf\n'), ((1719, 1734), 'tensorflow.matmul', 'tf.matmul', (['x', 'W'], {}), '(x, W)\n', (1728, 1734), True, 'import tensorflow as tf\n'), ((1780, 1836), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['x'], {'training': 'self.training'}), '(x, training=self.training)\n', (1809, 1836), True, 'import tensorflow as tf\n'), ((1878, 1891), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (1888, 1891), True, 'import tensorflow as tf\n'), ((2151, 2207), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['f'], {'training': 'self.training'}), '(f, training=self.training)\n', (2180, 2207), True, 'import tensorflow as tf\n'), ((2404, 2460), 'tensorflow.layers.batch_normalization', 'tf.layers.batch_normalization', (['f'], {'training': 'self.training'}), '(f, training=self.training)\n', (2433, 2460), True, 'import tensorflow as tf\n'), ((3472, 3484), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3482, 3484), True, 'import tensorflow as tf\n'), ((3507, 3542), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['self.save_folder'], {}), '(self.save_folder)\n', (3524, 3542), True, 'import tensorflow as tf\n'), ((6187, 6199), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (6197, 6199), True, 'import tensorflow as tf\n'), ((6242, 6286), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['self.save_folder'], {}), '(self.save_folder)\n', (6268, 6286), True, 'import tensorflow as tf\n'), ((6370, 6416), 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (["(lastckpt + '.meta')"], {}), "(lastckpt + '.meta')\n", (6396, 6416), True, 'import tensorflow as tf\n'), ((1084, 1100), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['w'], {}), '(w)\n', (1097, 1100), True, 'import tensorflow as tf\n'), ((3602, 3646), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['self.save_folder'], {}), '(self.save_folder)\n', (3628, 3646), True, 'import tensorflow as tf\n'), ((3738, 3784), 'tensorflow.train.import_meta_graph', 'tf.train.import_meta_graph', (["(lastckpt + '.meta')"], {}), "(lastckpt + '.meta')\n", (3764, 3784), True, 'import tensorflow as tf\n'), ((3995, 4027), 'data.shuffle', 'shuffle', (['trainInput', 'trainTarget'], {}), '(trainInput, trainTarget)\n', (4002, 4027), False, 'from data import shuffle\n'), ((4997, 5041), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['trainTarget', 'train_prediction'], {}), '(trainTarget, train_prediction)\n', (5010, 5041), False, 'from sklearn.metrics import roc_auc_score\n'), ((5070, 5112), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['testTarget', 'test_prediction'], {}), '(testTarget, test_prediction)\n', (5083, 5112), False, 'from sklearn.metrics import roc_auc_score\n'), ((3880, 3913), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3911, 3913), True, 'import tensorflow as tf\n'), ((8043, 8081), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (8079, 8081), True, 'import tensorflow as tf\n'), ((8171, 8209), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (8207, 8209), True, 'import tensorflow as tf\n'), ((8298, 8336), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (8334, 8336), True, 'import tensorflow as tf\n'), ((8417, 8455), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (8453, 8455), True, 'import tensorflow as tf\n'), ((8575, 8613), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (8611, 8613), True, 'import tensorflow as tf\n'), ((8696, 8734), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (8732, 8734), True, 'import tensorflow as tf\n'), ((8816, 8854), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (8852, 8854), True, 'import tensorflow as tf\n'), ((8929, 8967), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (8965, 8967), True, 'import tensorflow as tf\n'), ((9928, 9966), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (9964, 9966), True, 'import tensorflow as tf\n'), ((10056, 10094), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (10092, 10094), True, 'import tensorflow as tf\n'), ((10184, 10222), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (10220, 10222), True, 'import tensorflow as tf\n'), ((10307, 10345), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (10343, 10345), True, 'import tensorflow as tf\n'), ((10426, 10464), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (10462, 10464), True, 'import tensorflow as tf\n'), ((10580, 10618), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (10616, 10618), True, 'import tensorflow as tf\n'), ((10697, 10735), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (10733, 10735), True, 'import tensorflow as tf\n'), ((10814, 10852), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (10850, 10852), True, 'import tensorflow as tf\n'), ((10930, 10968), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (10966, 10968), True, 'import tensorflow as tf\n'), ((11043, 11081), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (11079, 11081), True, 'import tensorflow as tf\n'), ((12302, 12340), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (12338, 12340), True, 'import tensorflow as tf\n'), ((12432, 12470), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (12468, 12470), True, 'import tensorflow as tf\n'), ((12559, 12597), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (12595, 12597), True, 'import tensorflow as tf\n'), ((12690, 12728), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (12726, 12728), True, 'import tensorflow as tf\n'), ((12821, 12859), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (12857, 12859), True, 'import tensorflow as tf\n'), ((12949, 12987), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (12985, 12987), True, 'import tensorflow as tf\n'), ((13077, 13115), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (13113, 13115), True, 'import tensorflow as tf\n'), ((13200, 13238), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (13236, 13238), True, 'import tensorflow as tf\n'), ((13319, 13357), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (13355, 13357), True, 'import tensorflow as tf\n'), ((13476, 13514), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (13512, 13514), True, 'import tensorflow as tf\n'), ((13595, 13633), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (13631, 13633), True, 'import tensorflow as tf\n'), ((13712, 13750), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (13748, 13750), True, 'import tensorflow as tf\n'), ((13844, 13882), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (13880, 13882), True, 'import tensorflow as tf\n'), ((13964, 14002), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (14000, 14002), True, 'import tensorflow as tf\n'), ((14081, 14119), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (14117, 14119), True, 'import tensorflow as tf\n'), ((14198, 14236), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (14234, 14236), True, 'import tensorflow as tf\n'), ((14314, 14352), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (14350, 14352), True, 'import tensorflow as tf\n'), ((14427, 14465), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (14463, 14465), True, 'import tensorflow as tf\n')] |
import numpy as np
from scipy.optimize import root_scalar
from scipy.linalg import schur
from solovay_kitaev.gates.paulis import *
def dag(matrix : np.ndarray):
'''
dag
Performs a Hermitean conjugate (i.e. conjugate transpose) on the input matrix. Simply returns matrix.conj().T.
:: matrix : np.ndarray :: Input array; presumed to be a square matrix.
'''
return matrix.conj().T
def unitary_phase(phi):
'''
unitary_phase
Returns the solution to Eq. (10) from [DN05] <NAME> Nielsen (2005) -- arXiv:quant-ph/0505030.
The group commutator produced by solovay_kitaev.gc_decompose can be written as a rotation about some axis by an angle ('theta'). The purpose of this function is to calculate 'theta'. The value of 'theta' depends on the angle ('phi') of rotation about the X and Y axes used as intermediate values for the two unitary operators that, following a basis change, comprise the output of solovay_kitaev.gc_decompose.
:: phi :: Input angle (or numpy.ndarray of angles) that is assumed to be between 0 and pi/2.
'''
# [DN05, Eq. (10)]
return 2 * np.arcsin(
2 * (np.sin(phi/2) ** 2) * np.sqrt(
1 - np.sin(phi/2) ** 4
)
)
def invert_unitary_phase(theta):
'''
invert_unitary_phase
Numerically inverts the function solovay_kitaev.unitary_phase. This is needed because we are *given* a unitary phase and we must *calculate* an input that yields that given unitary phase.
This function is implemented using scipy.root_scalar. The current implementation is quite hacky and so it is NOT VECTORISED.
:: theta :: Input angle presumed to be the rotation angle for the input unitary of solovay_kitaev.gc_decompose.
'''
# Inversion of the theta function
# warning: this function is NOT VECTORISED
def fn(phi):
return unitary_phase(phi) - theta
solution = root_scalar(fn, bracket=[0, np.pi/2])
return solution.root
def gc_decompose(unitary, determinant_error=1e-6):
'''
gc_decompose
Implementation of the group commutator decomposition method explained in Section 4.1 of [DN05] Dawson and Nielsen (2005) -- arXiv:quant-ph/0505030. Returns the pair of unitary operators defined in [DN05, Eq. (11)].
:: unitary :: Unitary operator to be decomposed. Because of how this unitary operator is processed, the input MUST have determinant 1.
:: determinant_error :: Error tolerance used when checking that the determinant of the input unitary is 1.
'''
# gc_decompose requires an input with determinant 1.
assert(np.abs(np.linalg.det(unitary) - 1) < determinant_error)
eigen_values, _ = np.linalg.eig(unitary)
coefficient_of_identity = np.real(eigen_values[0])
output_phase = invert_unitary_phase(2 * np.arccos(coefficient_of_identity))
left_transform = np.cos(output_phase / 2) * identity - 1j * np.sin(output_phase / 2) * pauli_x
right_transform = np.cos(output_phase / 2) * identity - 1j * np.sin(output_phase / 2) * pauli_y
group_commutator = left_transform @ right_transform @ dag(left_transform) @ dag(right_transform)
_, schur_unitary = schur(unitary)
_, schur_group_commutator = schur(group_commutator)
similarity_transform = dag(schur_group_commutator) @ schur_unitary
left_transform = similarity_transform @ left_transform @ dag(similarity_transform)
right_transform = similarity_transform @ right_transform @ dag(similarity_transform)
return left_transform, right_transform
| [
"scipy.optimize.root_scalar",
"numpy.linalg.eig",
"numpy.sin",
"numpy.real",
"numpy.cos",
"scipy.linalg.schur",
"numpy.linalg.det",
"numpy.arccos"
] | [((1936, 1975), 'scipy.optimize.root_scalar', 'root_scalar', (['fn'], {'bracket': '[0, np.pi / 2]'}), '(fn, bracket=[0, np.pi / 2])\n', (1947, 1975), False, 'from scipy.optimize import root_scalar\n'), ((2729, 2751), 'numpy.linalg.eig', 'np.linalg.eig', (['unitary'], {}), '(unitary)\n', (2742, 2751), True, 'import numpy as np\n'), ((2782, 2806), 'numpy.real', 'np.real', (['eigen_values[0]'], {}), '(eigen_values[0])\n', (2789, 2806), True, 'import numpy as np\n'), ((3217, 3231), 'scipy.linalg.schur', 'schur', (['unitary'], {}), '(unitary)\n', (3222, 3231), False, 'from scipy.linalg import schur\n'), ((3264, 3287), 'scipy.linalg.schur', 'schur', (['group_commutator'], {}), '(group_commutator)\n', (3269, 3287), False, 'from scipy.linalg import schur\n'), ((2851, 2885), 'numpy.arccos', 'np.arccos', (['coefficient_of_identity'], {}), '(coefficient_of_identity)\n', (2860, 2885), True, 'import numpy as np\n'), ((2913, 2937), 'numpy.cos', 'np.cos', (['(output_phase / 2)'], {}), '(output_phase / 2)\n', (2919, 2937), True, 'import numpy as np\n'), ((3013, 3037), 'numpy.cos', 'np.cos', (['(output_phase / 2)'], {}), '(output_phase / 2)\n', (3019, 3037), True, 'import numpy as np\n'), ((2657, 2679), 'numpy.linalg.det', 'np.linalg.det', (['unitary'], {}), '(unitary)\n', (2670, 2679), True, 'import numpy as np\n'), ((2956, 2980), 'numpy.sin', 'np.sin', (['(output_phase / 2)'], {}), '(output_phase / 2)\n', (2962, 2980), True, 'import numpy as np\n'), ((3056, 3080), 'numpy.sin', 'np.sin', (['(output_phase / 2)'], {}), '(output_phase / 2)\n', (3062, 3080), True, 'import numpy as np\n'), ((1165, 1180), 'numpy.sin', 'np.sin', (['(phi / 2)'], {}), '(phi / 2)\n', (1171, 1180), True, 'import numpy as np\n'), ((1212, 1227), 'numpy.sin', 'np.sin', (['(phi / 2)'], {}), '(phi / 2)\n', (1218, 1227), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import sys
import numpy
from utils import *
class RBM(object):
def __init__(self, input=None, n_visible=2, n_hidden=3, \
W=None, hbias=None, vbias=None, rng=None):
self.n_visible = n_visible # num of units in visible (input) layer
self.n_hidden = n_hidden # num of units in hidden layer
if rng is None:
rng = numpy.random.RandomState(1234)
if W is None:
a = 1. / n_visible
initial_W = numpy.array(rng.uniform( # initialize W uniformly
low=-a,
high=a,
size=(n_visible, n_hidden)))
W = initial_W
if hbias is None:
hbias = numpy.zeros(n_hidden) # initialize h bias 0
if vbias is None:
vbias = numpy.zeros(n_visible) # initialize v bias 0
self.rng = rng
self.input = input
self.W = W
self.hbias = hbias
self.vbias = vbias
def contrastive_divergence(self, lr=0.1, k=1, input=None):
if input is not None:
self.input = input
''' CD-k '''
ph_mean, ph_sample = self.sample_h_given_v(self.input)
chain_start = ph_sample
for step in range(k):
if step == 0:
nv_means, nv_samples,\
nh_means, nh_samples = self.gibbs_hvh(chain_start)
else:
nv_means, nv_samples,\
nh_means, nh_samples = self.gibbs_hvh(nh_samples)
# chain_end = nv_samples
self.W += lr * (numpy.dot(self.input.T, ph_mean)
- numpy.dot(nv_samples.T, nh_means))
self.vbias += lr * numpy.mean(self.input - nv_samples, axis=0)
self.hbias += lr * numpy.mean(ph_mean - nh_means, axis=0)
# cost = self.get_reconstruction_cross_entropy()
# return cost
def sample_h_given_v(self, v0_sample):
h1_mean = self.propup(v0_sample)
h1_sample = self.rng.binomial(size=h1_mean.shape, # discrete: binomial
n=1,
p=h1_mean)
return [h1_mean, h1_sample]
def sample_v_given_h(self, h0_sample):
v1_mean = self.propdown(h0_sample)
v1_sample = self.rng.binomial(size=v1_mean.shape, # discrete: binomial
n=1,
p=v1_mean)
return [v1_mean, v1_sample]
def propup(self, v):
pre_sigmoid_activation = numpy.dot(v, self.W) + self.hbias
return sigmoid(pre_sigmoid_activation)
def propdown(self, h):
pre_sigmoid_activation = numpy.dot(h, self.W.T) + self.vbias
return sigmoid(pre_sigmoid_activation)
def gibbs_hvh(self, h0_sample):
v1_mean, v1_sample = self.sample_v_given_h(h0_sample)
h1_mean, h1_sample = self.sample_h_given_v(v1_sample)
return [v1_mean, v1_sample,
h1_mean, h1_sample]
def get_reconstruction_cross_entropy(self):
pre_sigmoid_activation_h = numpy.dot(self.input, self.W) + self.hbias
sigmoid_activation_h = sigmoid(pre_sigmoid_activation_h)
pre_sigmoid_activation_v = numpy.dot(sigmoid_activation_h, self.W.T) + self.vbias
sigmoid_activation_v = sigmoid(pre_sigmoid_activation_v)
cross_entropy = - numpy.mean(
numpy.sum(self.input * numpy.log(sigmoid_activation_v) +
(1 - self.input) * numpy.log(1 - sigmoid_activation_v),
axis=1))
return cross_entropy
def reconstruct(self, v):
h = sigmoid(numpy.dot(v, self.W) + self.hbias)
reconstructed_v = sigmoid(numpy.dot(h, self.W.T) + self.vbias)
return reconstructed_v
def test_rbm(learning_rate=0.1, k=1, training_epochs=1000):
data = numpy.array([[1,1,1,0,0,0],
[1,0,1,0,0,0],
[1,1,1,0,0,0],
[0,0,1,1,1,0],
[0,0,1,1,0,0],
[0,0,1,1,1,0]])
rng = numpy.random.RandomState(123)
# construct RBM
rbm = RBM(input=data, n_visible=6, n_hidden=2, rng=rng)
# train
for epoch in range(training_epochs):
rbm.contrastive_divergence(lr=learning_rate, k=k)
# cost = rbm.get_reconstruction_cross_entropy()
# print >> sys.stderr, 'Training epoch %d, cost is ' % epoch, cost
# test
v = numpy.array([[1, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0]])
print (rbm.reconstruct(v))
if __name__ == "__main__":
test_rbm()
| [
"numpy.log",
"numpy.zeros",
"numpy.random.RandomState",
"numpy.mean",
"numpy.array",
"numpy.dot"
] | [((3903, 4040), 'numpy.array', 'numpy.array', (['[[1, 1, 1, 0, 0, 0], [1, 0, 1, 0, 0, 0], [1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1,\n 0], [0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0]]'], {}), '([[1, 1, 1, 0, 0, 0], [1, 0, 1, 0, 0, 0], [1, 1, 1, 0, 0, 0], [0,\n 0, 1, 1, 1, 0], [0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0]])\n', (3914, 4040), False, 'import numpy\n'), ((4139, 4168), 'numpy.random.RandomState', 'numpy.random.RandomState', (['(123)'], {}), '(123)\n', (4163, 4168), False, 'import numpy\n'), ((4514, 4567), 'numpy.array', 'numpy.array', (['[[1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 0]]'], {}), '([[1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 1, 0]])\n', (4525, 4567), False, 'import numpy\n'), ((397, 427), 'numpy.random.RandomState', 'numpy.random.RandomState', (['(1234)'], {}), '(1234)\n', (421, 427), False, 'import numpy\n'), ((725, 746), 'numpy.zeros', 'numpy.zeros', (['n_hidden'], {}), '(n_hidden)\n', (736, 746), False, 'import numpy\n'), ((817, 839), 'numpy.zeros', 'numpy.zeros', (['n_visible'], {}), '(n_visible)\n', (828, 839), False, 'import numpy\n'), ((1707, 1750), 'numpy.mean', 'numpy.mean', (['(self.input - nv_samples)'], {'axis': '(0)'}), '(self.input - nv_samples, axis=0)\n', (1717, 1750), False, 'import numpy\n'), ((1778, 1816), 'numpy.mean', 'numpy.mean', (['(ph_mean - nh_means)'], {'axis': '(0)'}), '(ph_mean - nh_means, axis=0)\n', (1788, 1816), False, 'import numpy\n'), ((2572, 2592), 'numpy.dot', 'numpy.dot', (['v', 'self.W'], {}), '(v, self.W)\n', (2581, 2592), False, 'import numpy\n'), ((2714, 2736), 'numpy.dot', 'numpy.dot', (['h', 'self.W.T'], {}), '(h, self.W.T)\n', (2723, 2736), False, 'import numpy\n'), ((3121, 3150), 'numpy.dot', 'numpy.dot', (['self.input', 'self.W'], {}), '(self.input, self.W)\n', (3130, 3150), False, 'import numpy\n'), ((3273, 3314), 'numpy.dot', 'numpy.dot', (['sigmoid_activation_h', 'self.W.T'], {}), '(sigmoid_activation_h, self.W.T)\n', (3282, 3314), False, 'import numpy\n'), ((1586, 1618), 'numpy.dot', 'numpy.dot', (['self.input.T', 'ph_mean'], {}), '(self.input.T, ph_mean)\n', (1595, 1618), False, 'import numpy\n'), ((1645, 1678), 'numpy.dot', 'numpy.dot', (['nv_samples.T', 'nh_means'], {}), '(nv_samples.T, nh_means)\n', (1654, 1678), False, 'import numpy\n'), ((3690, 3710), 'numpy.dot', 'numpy.dot', (['v', 'self.W'], {}), '(v, self.W)\n', (3699, 3710), False, 'import numpy\n'), ((3759, 3781), 'numpy.dot', 'numpy.dot', (['h', 'self.W.T'], {}), '(h, self.W.T)\n', (3768, 3781), False, 'import numpy\n'), ((3468, 3499), 'numpy.log', 'numpy.log', (['sigmoid_activation_v'], {}), '(sigmoid_activation_v)\n', (3477, 3499), False, 'import numpy\n'), ((3533, 3568), 'numpy.log', 'numpy.log', (['(1 - sigmoid_activation_v)'], {}), '(1 - sigmoid_activation_v)\n', (3542, 3568), False, 'import numpy\n')] |
"""
Authors: <NAME>, <NAME>
E-mail: <EMAIL>, <EMAIL>
Course: Mashinski vid, FEEIT, Spring 2022
Date: 01.03.2022
Description: design, train, evaluate and apply a fully connected neural network for multi-class image classification
Python version: 3.6
"""
# python imports
import os
import cv2
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from keras.utils import to_categorical, plot_model
from keras.callbacks import ModelCheckpoint
from keras.losses import categorical_crossentropy
from keras.optimizers import Adam
# custom package imports
from Helpers_Classification import helper_model
from Helpers_Classification import helper_data
from Helpers_Classification import helper_stats
# --- paths ---
version = 'LV1_v3'
# NOTE: specify destination paths
srcPath = r'C:\Users\vase_\Downloads\ComputerVision\Data\Minst'
dstResultsPath = r'C:\Users\vase_\Downloads\ComputerVision\Data\Results'
dstModelsPath = r'C:\Users\vase_\Downloads\ComputerVision\Data\Models'
# create folders to save data from the current execution
if not os.path.exists(os.path.join(dstResultsPath, version)):
os.mkdir(os.path.join(dstResultsPath, version))
else:
# to avoid overwriting training results
print(f"Folder name {version} exists.")
exit(1)
resultsPath = os.path.join(dstResultsPath, version)
if not os.path.exists(os.path.join(dstModelsPath, version)):
os.mkdir(os.path.join(dstModelsPath, version))
modelsPath = os.path.join(dstModelsPath, version)
# --- variables ---
imgDims = {'rows': 28, 'cols': 28}
num_classes = 10
image_depth = 1
num_samples_to_load = 100 # how many samples to load from each class, value None loads all available samples
# optimization hyperprameters
batch_size = 128
epochs = 10
lr = 0.0001
# --- load and format data ---
# load full dataset into memory - image data and labels
x_train, y_train = helper_data.read_images(os.path.join(srcPath, 'train'), num_samples_to_load, image_depth)
x_test, y_test = helper_data.read_images(os.path.join(srcPath, 'test'), None, image_depth)
print(f'Training dataset shape: {x_train.shape}')
print(f'Number of training samples: {x_train.shape[0]}')
print(f'Number of test samples: {x_test.shape[0]}')
# one-hot encoding of labels
y_train = to_categorical(y_train, num_classes)
y_test = to_categorical(y_test, num_classes)
# create validation dataset (image and label data is shuffled in both datasets)
X_train, X_val, Y_train, Y_val = train_test_split(x_train, y_train,
test_size=0.2, # assign random 20% of the samples to the validation set
random_state=42) # fixed random seed enables repeatability of sample choice across executions
# --- construct model ---
model = helper_model.construct_model_cnn(num_classes) # build model architecture
# compile model
model.compile(loss=categorical_crossentropy, # categorical crossentropy for multi-class classification
optimizer=Adam(lr=lr),
metrics=['accuracy'])
# SGD(lr=lr, momentum=0.0, decay=0.0)
# --- fit model ---
model_checkpoint = ModelCheckpoint(filepath=os.path.join(modelsPath, 'checkpoint-{epoch:03d}-{val_accuracy:.4f}.hdf5'), # epoch number and val accuracy will be part of the weight file name
monitor='val_accuracy', # metric to monitor when selecting weight checkpoints to save
verbose=1,
save_best_only=True) # True saves only the weights after epochs where the monitored value (val accuracy) is improved
history = model.fit(X_train, Y_train,
batch_size=batch_size, # number of samples to process before updating the weights
epochs=epochs,
callbacks=[model_checkpoint],
verbose=1,
validation_data=(X_val, Y_val))
# --- save model ---
# save model architecture
print(model.summary()) # parameter info for each layer
with open(os.path.join(modelsPath, 'modelSummary.txt'), 'w') as fh: # save model summary
model.summary(print_fn=lambda x: fh.write(x + '\n'))
plot_model(model, to_file=os.path.join(modelsPath, 'modelDiagram.png'), show_shapes=True) # save diagram of model architecture
# save model configuration and weights
model_json = model.to_json() # serialize model architecture to JSON
with open(os.path.join(os.path.join(modelsPath, 'model.json')), "w") as json_file:
json_file.write(model_json)
model.save_weights(os.path.join(modelsPath, 'model.h5')) # serialize weights to HDF5
print("Saved model to disk.")
# --- save training curves and logs ---
helper_stats.save_training_logs(history=history, dst_path=modelsPath)
# --- apply model to test data ---
Y_test_pred = model.predict(x_test, verbose=1)
# --- evaluate model ---
# accuracy
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# confusion matrix
labels = [x for x in range(10)]
print(labels)
# convert one-hot encoded vectors to 1D list of classes
y_test_list = np.argmax(y_test, axis=1)
Y_test_pred_list = np.argmax(Y_test_pred, axis=1)
cm = confusion_matrix(y_test_list, Y_test_pred_list, labels) # takes 1D list of classes as input
print(cm)
# plot confusion matrix
target_names = [str(x) for x in labels]
fig = helper_stats.plot_confusion_matrix(cm, target_names, title='Confusion matrix', cmap=None, normalize=False)
fig.savefig(os.path.join(modelsPath, 'confusionMatrix.png'), dpi=fig.dpi) # save confusion matrix as figure
# --- save misclassified test samples ---
# find indices of misclassified samples
missed = [ind for ind, elem in enumerate(Y_test_pred_list) if elem != y_test_list[ind]]
for i in missed:
cv2.imwrite(os.path.join(resultsPath, str(i).zfill(6) + '_' + str(Y_test_pred_list[i]) + '_' + str(y_test_list[i]) + '.png'),
(x_test[i] * 255).astype(np.uint8)) # transform value range inback to [0, 255]
# file name: OrdinalNumberOfSample_PredictedClass_TrueClass.png
| [
"numpy.argmax",
"sklearn.model_selection.train_test_split",
"keras.optimizers.Adam",
"Helpers_Classification.helper_stats.plot_confusion_matrix",
"Helpers_Classification.helper_model.construct_model_cnn",
"sklearn.metrics.confusion_matrix",
"os.path.join",
"Helpers_Classification.helper_stats.save_tra... | [((1333, 1370), 'os.path.join', 'os.path.join', (['dstResultsPath', 'version'], {}), '(dstResultsPath, version)\n', (1345, 1370), False, 'import os\n'), ((1497, 1533), 'os.path.join', 'os.path.join', (['dstModelsPath', 'version'], {}), '(dstModelsPath, version)\n', (1509, 1533), False, 'import os\n'), ((2297, 2333), 'keras.utils.to_categorical', 'to_categorical', (['y_train', 'num_classes'], {}), '(y_train, num_classes)\n', (2311, 2333), False, 'from keras.utils import to_categorical, plot_model\n'), ((2343, 2378), 'keras.utils.to_categorical', 'to_categorical', (['y_test', 'num_classes'], {}), '(y_test, num_classes)\n', (2357, 2378), False, 'from keras.utils import to_categorical, plot_model\n'), ((2493, 2559), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x_train', 'y_train'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(x_train, y_train, test_size=0.2, random_state=42)\n', (2509, 2559), False, 'from sklearn.model_selection import train_test_split\n'), ((2837, 2882), 'Helpers_Classification.helper_model.construct_model_cnn', 'helper_model.construct_model_cnn', (['num_classes'], {}), '(num_classes)\n', (2869, 2882), False, 'from Helpers_Classification import helper_model\n'), ((4766, 4835), 'Helpers_Classification.helper_stats.save_training_logs', 'helper_stats.save_training_logs', ([], {'history': 'history', 'dst_path': 'modelsPath'}), '(history=history, dst_path=modelsPath)\n', (4797, 4835), False, 'from Helpers_Classification import helper_stats\n'), ((5209, 5234), 'numpy.argmax', 'np.argmax', (['y_test'], {'axis': '(1)'}), '(y_test, axis=1)\n', (5218, 5234), True, 'import numpy as np\n'), ((5254, 5284), 'numpy.argmax', 'np.argmax', (['Y_test_pred'], {'axis': '(1)'}), '(Y_test_pred, axis=1)\n', (5263, 5284), True, 'import numpy as np\n'), ((5291, 5346), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test_list', 'Y_test_pred_list', 'labels'], {}), '(y_test_list, Y_test_pred_list, labels)\n', (5307, 5346), False, 'from sklearn.metrics import confusion_matrix\n'), ((5467, 5578), 'Helpers_Classification.helper_stats.plot_confusion_matrix', 'helper_stats.plot_confusion_matrix', (['cm', 'target_names'], {'title': '"""Confusion matrix"""', 'cmap': 'None', 'normalize': '(False)'}), "(cm, target_names, title=\n 'Confusion matrix', cmap=None, normalize=False)\n", (5501, 5578), False, 'from Helpers_Classification import helper_stats\n'), ((1940, 1970), 'os.path.join', 'os.path.join', (['srcPath', '"""train"""'], {}), "(srcPath, 'train')\n", (1952, 1970), False, 'import os\n'), ((2047, 2076), 'os.path.join', 'os.path.join', (['srcPath', '"""test"""'], {}), "(srcPath, 'test')\n", (2059, 2076), False, 'import os\n'), ((4627, 4663), 'os.path.join', 'os.path.join', (['modelsPath', '"""model.h5"""'], {}), "(modelsPath, 'model.h5')\n", (4639, 4663), False, 'import os\n'), ((5586, 5633), 'os.path.join', 'os.path.join', (['modelsPath', '"""confusionMatrix.png"""'], {}), "(modelsPath, 'confusionMatrix.png')\n", (5598, 5633), False, 'import os\n'), ((1120, 1157), 'os.path.join', 'os.path.join', (['dstResultsPath', 'version'], {}), '(dstResultsPath, version)\n', (1132, 1157), False, 'import os\n'), ((1173, 1210), 'os.path.join', 'os.path.join', (['dstResultsPath', 'version'], {}), '(dstResultsPath, version)\n', (1185, 1210), False, 'import os\n'), ((1394, 1430), 'os.path.join', 'os.path.join', (['dstModelsPath', 'version'], {}), '(dstModelsPath, version)\n', (1406, 1430), False, 'import os\n'), ((1446, 1482), 'os.path.join', 'os.path.join', (['dstModelsPath', 'version'], {}), '(dstModelsPath, version)\n', (1458, 1482), False, 'import os\n'), ((3059, 3070), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'lr'}), '(lr=lr)\n', (3063, 3070), False, 'from keras.optimizers import Adam\n'), ((3211, 3285), 'os.path.join', 'os.path.join', (['modelsPath', '"""checkpoint-{epoch:03d}-{val_accuracy:.4f}.hdf5"""'], {}), "(modelsPath, 'checkpoint-{epoch:03d}-{val_accuracy:.4f}.hdf5')\n", (3223, 3285), False, 'import os\n'), ((4115, 4159), 'os.path.join', 'os.path.join', (['modelsPath', '"""modelSummary.txt"""'], {}), "(modelsPath, 'modelSummary.txt')\n", (4127, 4159), False, 'import os\n'), ((4281, 4325), 'os.path.join', 'os.path.join', (['modelsPath', '"""modelDiagram.png"""'], {}), "(modelsPath, 'modelDiagram.png')\n", (4293, 4325), False, 'import os\n'), ((4516, 4554), 'os.path.join', 'os.path.join', (['modelsPath', '"""model.json"""'], {}), "(modelsPath, 'model.json')\n", (4528, 4554), False, 'import os\n')] |
import numpy
from matplotlib import pyplot
from enyo.etc import spectrographs
tmtb = spectrographs.TMTWFOSBlueOpticalModel()
test_img = numpy.zeros((100,50), dtype=float)
wave0 = 3110.
pixelscale = 0.05153458543289052
dispscale = 15 #0.1995
test_img[20,:] = 1
test_img[60,:] = 1
test_img[:,10] = 1
test_img[:,30] = 1
#pyplot.imshow(test_img, origin='lower', interpolation='nearest', aspect='auto')
#pyplot.show()
spec, spec0, spat0 \
= tmtb.project_2d_spectrum(test_img, pixelscale, wave0, dispscale, field_coo=numpy.array([-3,0.5]))
print(spec0, spat0, spec.shape)
| [
"numpy.array",
"numpy.zeros",
"enyo.etc.spectrographs.TMTWFOSBlueOpticalModel"
] | [((88, 127), 'enyo.etc.spectrographs.TMTWFOSBlueOpticalModel', 'spectrographs.TMTWFOSBlueOpticalModel', ([], {}), '()\n', (125, 127), False, 'from enyo.etc import spectrographs\n'), ((140, 175), 'numpy.zeros', 'numpy.zeros', (['(100, 50)'], {'dtype': 'float'}), '((100, 50), dtype=float)\n', (151, 175), False, 'import numpy\n'), ((520, 542), 'numpy.array', 'numpy.array', (['[-3, 0.5]'], {}), '([-3, 0.5])\n', (531, 542), False, 'import numpy\n')] |
import unittest
import numpy
import pytest
import six
import chainer
from chainer import initializers
from chainer import testing
from chainer import utils
import chainerx
# Utilities for contiguousness tests.
#
# These tests checks incoming array contiguousness.
# As it's not possible to assume contiguousness of incoming arrays consistently
# (because gradient_check passes contiguous arrays in numerical_grad),
# we instead simulate the test failure. The function implementation raises an
# error if an incoming array matches the expected contiguousness and we expect
# the failure.
class _ContiguousnessMatched(Exception):
pass
def _is_f_contiguous(shape, strides, itemsize):
if numpy.prod(shape) <= 1:
return True
for sh, st in zip(shape, reversed(strides)):
if sh == 1:
continue
if st != itemsize:
return False
itemsize *= sh
return True
def _get_contiguousness(arr):
if isinstance(arr, chainerx.ndarray):
c_contig = arr.is_contiguous
f_contig = _is_f_contiguous(
arr.shape, arr.strides, arr.itemsize)
return (c_contig, f_contig)
return (arr.flags.c_contiguous, arr.flags.f_contiguous)
def _check_contiguousness(arr, expected_contiguous):
if isinstance(arr, chainer.Variable):
_check_contiguousness(arr.array, expected_contiguous)
return
c_contig, f_contig = _get_contiguousness(arr)
if numpy.prod(arr.shape) <= 1:
return # not applicable for this shape
if expected_contiguous is None:
# expected to be non-contiguous
if not c_contig and not f_contig:
raise _ContiguousnessMatched()
elif expected_contiguous == 'C':
# expected to be C-contiguous
if c_contig:
raise _ContiguousnessMatched()
else:
assert False
def _check_grad(grad, expect_grad_none, class_or_tuple):
if expect_grad_none:
assert grad is None
else:
isinstance(grad, class_or_tuple)
def _check_grads(grads, expect_grads_none, class_or_tuple):
for grad, expect_grad_none in six.moves.zip(grads, expect_grads_none):
_check_grad(grad, expect_grad_none, class_or_tuple)
_inject_backend_tests = testing.inject_backend_tests(
None,
[
# CPU tests
{},
{'use_ideep': 'always'},
# GPU tests
{'use_cuda': True},
{'use_cuda': True, 'cuda_device': 1},
# ChainerX tests
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
])
def _forward_correct(x1, x2):
dt = x1.dtype.type
y1 = (x1 + x2) ** dt(2)
y2 = (x1 ** dt(2)) * (x2 ** dt(2))
return utils.force_array(y1), utils.force_array(y2)
def _backward_correct(x1, x2, gy1, gy2):
dt = x1.dtype.type
ggx1 = (
+ gy1 * dt(2) * (x1 + x2)
+ gy2 * dt(2) * x1 * x2 ** dt(2))
ggx2 = (
+ gy1 * dt(2) * (x1 + x2)
+ gy2 * dt(2) * x1 ** dt(2) * x2)
return ggx1, ggx2
def _double_backward_correct(x1, x2, gy1, gy2, ggx1, ggx2):
dt = x1.dtype.type
ggy1 = (ggx1 + ggx2) * dt(2) * (x1 + x2)
ggy2 = (ggx1 * x2 + ggx2 * x1) * dt(2) * x1 * x2
gx1 = (
+ ggx1 * (dt(2) * gy1 + dt(2) * x2 ** dt(2) * gy2)
+ ggx2 * (dt(2) * gy1 + dt(4) * x1 * x2 * gy2))
gx2 = (
+ ggx1 * (dt(2) * gy1 + dt(4) * x1 * x2 * gy2)
+ ggx2 * (dt(2) * gy1 + dt(2) * x1 ** dt(2) * gy2))
return gx1, gx2, ggy1, ggy2
# TestFunctionTestSuccessful
#
# This test checks for successful case.
# Incoming array types are also checked.
class FuncCorrectlyImplemented(chainer.FunctionNode):
def __init__(
self, device,
expect_grad_outputs_none=(False, False),
expect_grad_grad_inputs_none=(False, False)):
self.device = device
self.expect_grad_outputs_none = expect_grad_outputs_none
self.expect_grad_grad_inputs_none = expect_grad_grad_inputs_none
def forward(self, inputs):
device = self.device
x1, x2 = inputs
if device.xp is chainerx:
fallback_device = device.fallback_device
assert isinstance(x1, fallback_device.supported_array_types)
assert isinstance(x2, fallback_device.supported_array_types)
self.retain_inputs((0, 1))
y1, y2 = _forward_correct(x1, x2)
return utils.force_array(y1), utils.force_array(y2)
def backward(self, indexes, grad_outputs):
device = self.device
_check_grads(
grad_outputs, self.expect_grad_outputs_none,
device.supported_array_types)
x1, x2 = self.get_retained_inputs()
gy1, gy2 = grad_outputs
assert isinstance(x1.array, device.supported_array_types)
assert isinstance(x2.array, device.supported_array_types)
grad_func = FuncGradCorrectlyImplemented(
device,
self.expect_grad_outputs_none,
self.expect_grad_grad_inputs_none)
return grad_func.apply((x1, x2, gy1, gy2))
class FuncGradCorrectlyImplemented(chainer.FunctionNode):
def __init__(
self, device,
expect_grad_outputs_none,
expect_grad_grad_inputs_none):
self.device = device
self.expect_grad_outputs_none = expect_grad_outputs_none
self.expect_grad_grad_inputs_none = expect_grad_grad_inputs_none
def forward(self, inputs_and_grad_outputs):
device = self.device
x1, x2, gy1, gy2 = inputs_and_grad_outputs
if device.xp is chainerx:
fallback_device = device.fallback_device
_check_grads(
(gy1, gy2), self.expect_grad_outputs_none,
fallback_device.supported_array_types)
self.retain_inputs((0, 1, 2, 3))
ggx1, ggx2 = _backward_correct(
x1, x2,
0 if self.expect_grad_outputs_none[0] else gy1,
0 if self.expect_grad_outputs_none[1] else gy2)
return utils.force_array(ggx1), utils.force_array(ggx2)
def backward(self, indexes, grad_grad_inputs):
device = self.device
_check_grads(
grad_grad_inputs, self.expect_grad_grad_inputs_none,
chainer.Variable)
ggx1, ggx2 = grad_grad_inputs
x1, x2, gy1, gy2 = self.get_retained_inputs()
assert isinstance(x1, chainer.Variable)
assert isinstance(x2, chainer.Variable)
assert isinstance(x1.array, device.supported_array_types)
assert isinstance(x2.array, device.supported_array_types)
_check_grads(
(gy1, gy2), self.expect_grad_outputs_none, chainer.Variable)
if not self.expect_grad_outputs_none[0]:
isinstance(gy1.array, device.supported_array_types)
if not self.expect_grad_outputs_none[1]:
isinstance(gy2.array, device.supported_array_types)
gx1, gx2, ggy1, ggy2 = _double_backward_correct(
x1, x2,
0 if self.expect_grad_outputs_none[0] else gy1,
0 if self.expect_grad_outputs_none[1] else gy2,
0 if self.expect_grad_grad_inputs_none[0] else ggx1,
0 if self.expect_grad_grad_inputs_none[1] else ggx2)
return gx1, gx2, ggy1, ggy2
@testing.parameterize(*testing.product({
'shape': [(3, 2), (2,), (1,), (), (2, 0, 3)],
}))
@_inject_backend_tests
class TestFunctionTestSuccessful(testing.FunctionTestCase):
def generate_inputs(self):
x1 = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
x2 = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
return x1, x2
def forward(self, inputs, device):
func = FuncCorrectlyImplemented(device)
return func.apply(inputs)
def forward_expected(self, inputs):
return _forward_correct(*inputs)
@_inject_backend_tests
class TestFunctionTestSuccessfulNoneGrads(testing.FunctionTestCase):
def generate_inputs(self):
x1 = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
x2 = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
return x1, x2
def generate_grad_outputs(self, output_templates):
grad_outputs = (
None,
(numpy.random.uniform(-1, 1, output_templates[1].shape)
.astype(output_templates[1].dtype)))
return grad_outputs
def generate_grad_grad_inputs(self, input_templates):
grad_inputs = (
(numpy.random.uniform(-1, 1, input_templates[0].shape)
.astype(input_templates[0].dtype)),
None)
return grad_inputs
def forward(self, inputs, device):
func = FuncCorrectlyImplemented(
device,
expect_grad_outputs_none=(True, False),
expect_grad_grad_inputs_none=(False, True))
return func.apply(inputs)
def forward_expected(self, inputs):
return _forward_correct(*inputs)
# TestFunctionTestIncorrectForward
#
# This test checks if it can detect incorrect forward implementation.
class FuncWithIncorrectForward(chainer.FunctionNode):
def forward(self, inputs):
x1, x2 = inputs
y1, y2 = _forward_correct(x1, x2)
y1, y2 = utils.force_array(y1), utils.force_array(y2)
y2[...] += 1 # ! make incorrect
return y1, y2
def backward(self, *args, **kwargs):
assert False # should never be called
@testing.parameterize(*testing.product({
'shape': [(3, 2), (2,), (1,), ()],
}))
@_inject_backend_tests
@pytest.mark.xfail(strict=True, raises=testing.FunctionTestError)
class TestFunctionTestIncorrectForward(testing.FunctionTestCase):
skip_backward_test = True
skip_double_backward_test = True
def generate_inputs(self):
x1 = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
x2 = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
return x1, x2
def forward(self, inputs, device):
func = FuncWithIncorrectForward()
return func.apply(inputs)
def forward_expected(self, inputs):
return _forward_correct(*inputs)
# TestFunctionTestIncorrectBackward
#
# This test checks if it can detect incorrect backward implementation.
class FuncWithIncorrectBackward(chainer.FunctionNode):
def __init__(self, expect_grad_outputs_none=(False, False)):
self.expect_grad_outputs_none = expect_grad_outputs_none
def forward(self, inputs):
x1, x2 = inputs
y1, y2 = _forward_correct(x1, x2)
self.retain_inputs((0, 1))
return utils.force_array(y1), utils.force_array(y2)
def backward(self, indexes, grad_outputs):
gy1, gy2 = grad_outputs
x1, x2 = self.get_retained_inputs()
ggx1, ggx2 = _backward_correct(
x1, x2,
0 if self.expect_grad_outputs_none[0] else gy1,
0 if self.expect_grad_outputs_none[1] else gy2)
ggx1 = ggx1 + 100000
ggx2 = ggx2 + 10000 # ! make incorrect
return utils.force_array(ggx1), utils.force_array(ggx2)
@testing.parameterize(*testing.product({
'shape': [(3, 2), (2,), (1,), ()],
}))
@testing.fix_random()
@_inject_backend_tests
@pytest.mark.xfail(strict=True, raises=testing.FunctionTestError)
class TestFunctionTestIncorrectBackward(testing.FunctionTestCase):
skip_forward_test = True
skip_double_backward_test = True
def generate_inputs(self):
x1 = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
x2 = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
return x1, x2
def forward(self, inputs, device):
func = FuncWithIncorrectBackward()
return func.apply(inputs)
def forward_expected(self, inputs):
return _forward_correct(*inputs)
@testing.fix_random()
@_inject_backend_tests
@pytest.mark.xfail(strict=True, raises=testing.FunctionTestError)
class TestFunctionTestIncorrectBackwardNoneGrads(testing.FunctionTestCase):
skip_forward_test = True
skip_double_backward_test = True
def generate_inputs(self):
x1 = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
x2 = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
return x1, x2
def generate_grad_outputs(self, output_templates):
grad_outputs = (
None,
(numpy.random.uniform(-1, 1, output_templates[1].shape)
.astype(output_templates[1].dtype)))
return grad_outputs
def forward(self, inputs, device):
func = FuncWithIncorrectBackward(
expect_grad_outputs_none=(True, False))
return func.apply(inputs)
def forward_expected(self, inputs):
return _forward_correct(*inputs)
# TestFunctionTestIncorrectDoubleBackward
#
# This test checks if it can detect incorrect double backward implementation.
class FuncWithIncorrectDoubleBackward(chainer.FunctionNode):
def __init__(
self,
expect_grad_outputs_none=(False, False),
expect_grad_grad_inputs_none=(False, False)):
self.expect_grad_outputs_none = expect_grad_outputs_none
self.expect_grad_grad_inputs_none = expect_grad_grad_inputs_none
def forward(self, inputs):
x1, x2 = inputs
y1, y2 = _forward_correct(x1, x2)
self.retain_inputs((0, 1))
return utils.force_array(y1), utils.force_array(y2)
def backward(self, indexes, grad_outputs):
x1, x2 = self.get_retained_inputs()
gy1, gy2 = grad_outputs
grad_func = FuncGradWithIncorrectDoubleBackward(
expect_grad_outputs_none=self.expect_grad_outputs_none,
expect_grad_grad_inputs_none=self.expect_grad_grad_inputs_none)
return grad_func.apply((x1, x2, gy1, gy2))
class FuncGradWithIncorrectDoubleBackward(chainer.FunctionNode):
def __init__(
self,
expect_grad_outputs_none=(False, False),
expect_grad_grad_inputs_none=(False, False)):
self.expect_grad_outputs_none = expect_grad_outputs_none
self.expect_grad_grad_inputs_none = expect_grad_grad_inputs_none
def forward(self, inputs_and_grad_outputs):
x1, x2, gy1, gy2 = inputs_and_grad_outputs
self.retain_inputs((0, 1, 2, 3))
ggx1, ggx2 = _backward_correct(
x1, x2,
0 if self.expect_grad_outputs_none[0] else gy1,
0 if self.expect_grad_outputs_none[1] else gy2)
return utils.force_array(ggx1), utils.force_array(ggx2)
def backward(self, indexes, grad_grad_inputs):
ggx1, ggx2 = grad_grad_inputs
x1, x2, gy1, gy2 = self.get_retained_inputs()
gx1, gx2, ggy1, ggy2 = _double_backward_correct(
x1, x2,
0 if self.expect_grad_outputs_none[0] else gy1,
0 if self.expect_grad_outputs_none[1] else gy2,
0 if self.expect_grad_grad_inputs_none[0] else ggx1,
0 if self.expect_grad_grad_inputs_none[1] else ggx2)
ggy2 = ggy2 + 10000 # ! make incorrect
return gx1, gx2, ggy1, ggy2
@testing.parameterize(*testing.product({
'shape': [(3, 2), (2,), (1,), ()],
}))
@testing.fix_random()
@_inject_backend_tests
@pytest.mark.xfail(strict=True, raises=testing.FunctionTestError)
class TestFunctionTestIncorrectDoubleBackward(testing.FunctionTestCase):
skip_forward_test = True
skip_backward_test = True
def generate_inputs(self):
x1 = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
x2 = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
return x1, x2
def forward(self, inputs, device):
func = FuncWithIncorrectDoubleBackward()
return func.apply(inputs)
def forward_expected(self, inputs):
return _forward_correct(*inputs)
@testing.fix_random()
@_inject_backend_tests
@pytest.mark.xfail(strict=True, raises=testing.FunctionTestError)
class TestFunctionTestIncorrectDoubleBackwardNoneGrads(
testing.FunctionTestCase):
skip_forward_test = True
skip_backward_test = True
def generate_inputs(self):
x1 = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
x2 = numpy.random.uniform(-1, 1, (3, 2)).astype(numpy.float32)
return x1, x2
def generate_grad_outputs(self, output_templates):
grad_outputs = (
None,
(numpy.random.uniform(-1, 1, output_templates[1].shape)
.astype(output_templates[1].dtype)))
return grad_outputs
def generate_grad_grad_inputs(self, input_templates):
grad_inputs = (
(numpy.random.uniform(-1, 1, input_templates[0].shape)
.astype(input_templates[0].dtype)),
None)
return grad_inputs
def forward(self, inputs, device):
func = FuncWithIncorrectDoubleBackward(
expect_grad_outputs_none=(True, False),
expect_grad_grad_inputs_none=(False, True))
return func.apply(inputs)
def forward_expected(self, inputs):
return _forward_correct(*inputs)
# FunctionTestCaseArrayContiguousnessTest
class FuncWithContiguousnessCheck(chainer.FunctionNode):
def __init__(self, contiguous, check_on):
self.contiguous = contiguous
self.check_on = check_on
def _check_contiguousness(self, arr):
assert isinstance(arr, chainer.get_array_types())
_check_contiguousness(arr, self.contiguous)
def forward(self, inputs):
x1, x2 = inputs
if self.check_on == 'forward_input':
self._check_contiguousness(x1)
self._check_contiguousness(x2)
self.retain_inputs((0, 1))
y1, y2 = _forward_correct(x1, x2)
return utils.force_array(y1), utils.force_array(y2)
def backward(self, indexes, grad_outputs):
x1, x2 = self.get_retained_inputs()
gy1, gy2 = grad_outputs
if self.check_on == 'backward_retained_input':
self._check_contiguousness(x1.array)
self._check_contiguousness(x2.array)
elif self.check_on == 'backward_grad_output':
self._check_contiguousness(gy1.array)
self._check_contiguousness(gy2.array)
grad_func = FuncGradWithContiguousnessCheck(
self.contiguous, self.check_on)
return grad_func.apply((x1, x2, gy1, gy2))
class FuncGradWithContiguousnessCheck(chainer.FunctionNode):
def __init__(self, contiguous, check_on):
self.contiguous = contiguous
self.check_on = check_on
def _check_contiguousness(self, arr):
testing.function_link._check_contiguousness(arr, self.contiguous)
def forward(self, inputs_and_grad_outputs):
x1, x2, gy1, gy2 = inputs_and_grad_outputs
self.retain_inputs((0, 1, 2, 3))
ggx1, ggx2 = _backward_correct(x1, x2, gy1, gy2)
return utils.force_array(ggx1), utils.force_array(ggx2)
def backward(self, indexes, grad_grad_inputs):
ggx1, ggx2 = grad_grad_inputs
if self.check_on == 'double_backward_grad_grad_input':
self._check_contiguousness(ggx1)
self._check_contiguousness(ggx2)
x1, x2, gy1, gy2 = self.get_retained_inputs()
gx1, gx2, ggy1, ggy2 = _double_backward_correct(
x1, x2, gy1, gy2, ggx1, ggx2)
return gx1, gx2, ggy1, ggy2
@testing.parameterize(*testing.product({
'shape': [(3, 2), (2,), (1, 2)],
'contiguous': [None, 'C'],
'check_on': [ # Check points in which contiguousness is probed.
'forward_input',
# TODO(niboshi): As gradient_check.check_backward currently copies the
# grads without preserving strides, they cannot be non-contiguous.
# Enable this check after check_backward will be fixed.
# 'backward_grad_output',
'backward_retained_input',
# TODO(niboshi): Enable this check after check_backward will be fixed.
# 'double_backward_grad_grad_input',
]}))
@_inject_backend_tests
@pytest.mark.xfail(strict=True, raises=_ContiguousnessMatched)
class FunctionTestCaseArrayContiguousnessTest(testing.FunctionTestCase):
def generate_inputs(self):
x1 = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
x2 = numpy.random.uniform(-1, 1, self.shape).astype(numpy.float32)
return x1, x2
def forward(self, inputs, device):
func = FuncWithContiguousnessCheck(self.contiguous, self.check_on)
return func.apply(inputs)
def forward_expected(self, inputs):
return _forward_correct(*inputs)
def before_test(self, test_name):
# Some combinations of test methods and check points are irrelevant.
# Skip such combinations.
# For example, `test_forward` method does not generate grad_outputs.
if test_name == 'test_forward':
if self.check_on != 'forward_input':
raise unittest.SkipTest()
if test_name == 'test_backward':
if self.check_on == 'double_backward_grad_grad_input':
raise unittest.SkipTest()
class Dot(chainer.FunctionNode):
def __init__(
self, incorrect_forward=False, incorrect_backward_gx=False,
incorrect_backward_gp=False, contiguous=None,
check_on=None):
self.incorrect_forward = incorrect_forward
self.incorrect_backward_gx = incorrect_backward_gx
self.incorrect_backward_gp = incorrect_backward_gp
self.contiguous = contiguous
self.check_on = check_on
def forward(self, inputs):
self.retain_inputs((0, 1))
xp = chainer.backend.get_array_module(*inputs)
x, p = inputs
if self.check_on == 'forward_input':
self._check_contiguousness(x)
self._check_contiguousness(p)
y = xp.dot(x, p)
if self.incorrect_forward:
y *= 9999
return y,
def backward(self, indexes, grad_outputs):
gy, = grad_outputs
x, p = self.get_retained_inputs()
if self.check_on == 'backward_retained_input':
self._check_contiguousness(x.array)
self._check_contiguousness(p.array)
elif self.check_on == 'backward_grad_output':
self._check_contiguousness(gy.array)
gx = chainer.functions.matmul(gy, p.T)
gp = chainer.functions.matmul(x.T, gy)
if self.incorrect_backward_gx:
gx /= 2
if self.incorrect_backward_gp:
gp += 1000
return gx, gp
def _check_contiguousness(self, arr):
assert isinstance(arr, chainer.get_array_types())
_check_contiguousness(arr, self.contiguous)
class DotLink(chainer.Link):
"""correctly implemented dot."""
def __init__(
self, in_size, out_size, initial_p=None, contiguous=None,
check_on=None):
super(DotLink, self).__init__()
with self.init_scope():
if initial_p is None:
initial_p = initializers.Constant(1)
self.p = chainer.Parameter(initial_p, shape=(in_size, out_size))
self.contiguous = contiguous
self.check_on = check_on
def forward(self, inputs):
x = inputs
p = self.p
contiguous = self.contiguous
check_on = self.check_on
y, = Dot(contiguous=contiguous, check_on=check_on).apply((x, p))
return y
class DotLinkIncorrectForward(DotLink):
"""Incorrectly implemented dot (forward)."""
def __init__(self, *args, **kwargs):
super(DotLinkIncorrectForward, self).__init__(*args, **kwargs)
def forward(self, inputs):
x = inputs
p = self.p
y, = Dot(incorrect_forward=True).apply((x, p))
return y
class DotLinkIncorrectBackward(DotLink):
"""Incorrect implementation of dot (backward)."""
def __init__(self, incorrect_gx, incorrect_gp, *args, **kwargs):
super(DotLinkIncorrectBackward, self).__init__(*args, **kwargs)
self.incorrect_gx = incorrect_gx
self.incorrect_gp = incorrect_gp
def forward(self, inputs):
x = inputs
p = self.p
y, = Dot(
incorrect_backward_gx=self.incorrect_gx,
incorrect_backward_gp=self.incorrect_gp).apply((x, p))
return y
class DotLinkIncorrectInitialization(DotLink):
"""Incorrect implementation of dot (parameter initialization)."""
def __init__(self, in_size, out_size, initial_p=None):
# Ignores given initializer here.
super(DotLinkIncorrectInitialization, self).__init__(
in_size, out_size, initializers.Constant(0))
class DotLinkTestBase(object):
param_names = ('p',)
def setUp(self):
self.n = 1
self.in_size = 2
self.out_size = 3
self.dtype = numpy.float32
def generate_params(self):
in_size = self.in_size
out_size = self.out_size
return numpy.random.uniform(
-1, 1, (in_size, out_size)).astype(self.dtype),
def create_link(self, initializers):
initial_p, = initializers
in_size = self.in_size
out_size = self.out_size
return DotLink(in_size, out_size, initial_p)
def generate_inputs(self):
return numpy.random.rand(self.n, self.in_size).astype(self.dtype),
# Required for forward backward tests.
def forward_expected(self, link, inputs):
p = link.p.array
x, = inputs
return numpy.dot(x, p),
# Requires for initializers test.
def get_initializers(self):
return [
initializers.Constant(0), 2,
testing.InitializerArgument(None, initializers.Constant(1))],
@_inject_backend_tests
class TestLinkCorrect(DotLinkTestBase, testing.LinkTestCase):
pass
@_inject_backend_tests
class TestLinkInitializersCorrect(
DotLinkTestBase, testing.LinkInitializersTestCase):
pass
@_inject_backend_tests
@pytest.mark.xfail(strict=True, raises=testing.LinkTestError)
class TestLinkIncorrectForward(DotLinkTestBase, testing.LinkTestCase):
skip_backward_test = True
def create_link(self, initializers):
initial_p, = initializers
in_size = self.in_size
out_size = self.out_size
link = DotLinkIncorrectForward(in_size, out_size, initial_p)
return link
@testing.fix_random()
@_inject_backend_tests
@pytest.mark.xfail(strict=True, raises=testing.LinkTestError)
class TestLinkIncorrectBackwardInput(DotLinkTestBase, testing.LinkTestCase):
skip_forward_test = True
def create_link(self, initializers):
initial_p, = initializers
in_size = self.in_size
out_size = self.out_size
link = DotLinkIncorrectBackward(
True, False, in_size, out_size, initial_p)
return link
@testing.fix_random()
@_inject_backend_tests
@pytest.mark.xfail(strict=True, raises=testing.LinkTestError)
class TestLinkIncorrectBackwardParam(DotLinkTestBase, testing.LinkTestCase):
skip_forward_test = True
def create_link(self, initializers):
initial_p, = initializers
in_size = self.in_size
out_size = self.out_size
link = DotLinkIncorrectBackward(
False, True, in_size, out_size, initial_p)
return link
@_inject_backend_tests
@pytest.mark.xfail(strict=True, raises=TypeError)
class TestLinkIncorrectCreateLink(DotLinkTestBase, testing.LinkTestCase):
def create_link(self, initializers):
# Invalid return type (that is not an instance of chainer.Link).
return numpy.array([1])
@testing.parameterize(*testing.product({
'invalid_forward_backward_initializer': [
chainer.Variable(numpy.array([1])),
chainer.Parameter(numpy.array([1])),
]}))
@_inject_backend_tests
@pytest.mark.xfail(strict=True, raises=TypeError)
class TestLinkIncorrectForwardBackwardInitializers(
DotLinkTestBase, testing.LinkTestCase):
def generate_params(self):
return self.invalid_forward_backward_initializer,
@_inject_backend_tests
@pytest.mark.xfail(strict=True, raises=testing.LinkTestError)
class TestLinkIncorrectBackwardInitializers(
DotLinkTestBase, testing.LinkInitializersTestCase):
def create_link(self, initializers):
initial_p, = initializers
in_size = self.in_size
out_size = self.out_size
link = DotLinkIncorrectInitialization(in_size, out_size, initial_p)
return link
@testing.parameterize(*testing.product({
'invalid_initializer': [
chainer.Variable(numpy.array([1])),
chainer.Parameter(numpy.array([1])),
]}))
@_inject_backend_tests
@pytest.mark.xfail(strict=True, raises=TypeError)
class TestLinkIncorrectInitializers(
DotLinkTestBase, testing.LinkInitializersTestCase):
def get_initializers(self):
return [self.invalid_initializer],
@testing.parameterize(*testing.product({
'contiguous': [None, 'C'],
'check_on': [ # Check points in which contiguousness is probed.
'forward_input',
# TODO(hvy): As gradient_check.check_backward currently copies the
# grads without preserving strides, they cannot be non-contiguous.
# Enable this check after check_backward will be fixed.
# 'backward_grad_output',
'backward_retained_input',
# TODO(hvy): Enable this check after check_backward will be fixed.
# 'double_backward_grad_grad_input',
]}))
@_inject_backend_tests
@pytest.mark.xfail(strict=True, raises=_ContiguousnessMatched)
class TestLinkContiguousness(DotLinkTestBase, testing.LinkTestCase):
def before_test(self, test_name):
# Some combinations of test methods and check points are irrelevant.
# Skip such combinations.
# For example, `test_forward` method does not generate grad_outputs.
if test_name == 'test_forward':
if self.check_on != 'forward_input':
raise unittest.SkipTest()
def create_link(self, initializers):
initial_p, = initializers
in_size = self.in_size
out_size = self.out_size
contiguous = self.contiguous
check_on = self.check_on
link = DotLink(
in_size, out_size, initial_p, contiguous=contiguous,
check_on=check_on)
return link
testing.run_module(__name__, __file__)
| [
"chainer.utils.force_array",
"chainer.Parameter",
"chainer.initializers.Constant",
"six.moves.zip",
"chainer.testing.run_module",
"numpy.prod",
"chainer.testing.function_link._check_contiguousness",
"chainer.testing.inject_backend_tests",
"chainer.testing.fix_random",
"chainer.backend.get_array_mo... | [((2244, 2547), 'chainer.testing.inject_backend_tests', 'testing.inject_backend_tests', (['None', "[{}, {'use_ideep': 'always'}, {'use_cuda': True}, {'use_cuda': True,\n 'cuda_device': 1}, {'use_chainerx': True, 'chainerx_device': 'native:0'\n }, {'use_chainerx': True, 'chainerx_device': 'cuda:0'}, {'use_chainerx':\n True, 'chainerx_device': 'cuda:1'}]"], {}), "(None, [{}, {'use_ideep': 'always'}, {\n 'use_cuda': True}, {'use_cuda': True, 'cuda_device': 1}, {\n 'use_chainerx': True, 'chainerx_device': 'native:0'}, {'use_chainerx': \n True, 'chainerx_device': 'cuda:0'}, {'use_chainerx': True,\n 'chainerx_device': 'cuda:1'}])\n", (2272, 2547), False, 'from chainer import testing\n'), ((9622, 9686), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'strict': '(True)', 'raises': 'testing.FunctionTestError'}), '(strict=True, raises=testing.FunctionTestError)\n', (9639, 9686), False, 'import pytest\n'), ((11244, 11264), 'chainer.testing.fix_random', 'testing.fix_random', ([], {}), '()\n', (11262, 11264), False, 'from chainer import testing\n'), ((11289, 11353), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'strict': '(True)', 'raises': 'testing.FunctionTestError'}), '(strict=True, raises=testing.FunctionTestError)\n', (11306, 11353), False, 'import pytest\n'), ((11893, 11913), 'chainer.testing.fix_random', 'testing.fix_random', ([], {}), '()\n', (11911, 11913), False, 'from chainer import testing\n'), ((11938, 12002), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'strict': '(True)', 'raises': 'testing.FunctionTestError'}), '(strict=True, raises=testing.FunctionTestError)\n', (11955, 12002), False, 'import pytest\n'), ((15256, 15276), 'chainer.testing.fix_random', 'testing.fix_random', ([], {}), '()\n', (15274, 15276), False, 'from chainer import testing\n'), ((15301, 15365), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'strict': '(True)', 'raises': 'testing.FunctionTestError'}), '(strict=True, raises=testing.FunctionTestError)\n', (15318, 15365), False, 'import pytest\n'), ((15910, 15930), 'chainer.testing.fix_random', 'testing.fix_random', ([], {}), '()\n', (15928, 15930), False, 'from chainer import testing\n'), ((15955, 16019), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'strict': '(True)', 'raises': 'testing.FunctionTestError'}), '(strict=True, raises=testing.FunctionTestError)\n', (15972, 16019), False, 'import pytest\n'), ((20085, 20146), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'strict': '(True)', 'raises': '_ContiguousnessMatched'}), '(strict=True, raises=_ContiguousnessMatched)\n', (20102, 20146), False, 'import pytest\n'), ((26016, 26076), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'strict': '(True)', 'raises': 'testing.LinkTestError'}), '(strict=True, raises=testing.LinkTestError)\n', (26033, 26076), False, 'import pytest\n'), ((26411, 26431), 'chainer.testing.fix_random', 'testing.fix_random', ([], {}), '()\n', (26429, 26431), False, 'from chainer import testing\n'), ((26456, 26516), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'strict': '(True)', 'raises': 'testing.LinkTestError'}), '(strict=True, raises=testing.LinkTestError)\n', (26473, 26516), False, 'import pytest\n'), ((26883, 26903), 'chainer.testing.fix_random', 'testing.fix_random', ([], {}), '()\n', (26901, 26903), False, 'from chainer import testing\n'), ((26928, 26988), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'strict': '(True)', 'raises': 'testing.LinkTestError'}), '(strict=True, raises=testing.LinkTestError)\n', (26945, 26988), False, 'import pytest\n'), ((27378, 27426), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'strict': '(True)', 'raises': 'TypeError'}), '(strict=True, raises=TypeError)\n', (27395, 27426), False, 'import pytest\n'), ((27859, 27907), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'strict': '(True)', 'raises': 'TypeError'}), '(strict=True, raises=TypeError)\n', (27876, 27907), False, 'import pytest\n'), ((28124, 28184), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'strict': '(True)', 'raises': 'testing.LinkTestError'}), '(strict=True, raises=testing.LinkTestError)\n', (28141, 28184), False, 'import pytest\n'), ((28720, 28768), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'strict': '(True)', 'raises': 'TypeError'}), '(strict=True, raises=TypeError)\n', (28737, 28768), False, 'import pytest\n'), ((29546, 29607), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'strict': '(True)', 'raises': '_ContiguousnessMatched'}), '(strict=True, raises=_ContiguousnessMatched)\n', (29563, 29607), False, 'import pytest\n'), ((30387, 30425), 'chainer.testing.run_module', 'testing.run_module', (['__name__', '__file__'], {}), '(__name__, __file__)\n', (30405, 30425), False, 'from chainer import testing\n'), ((2117, 2156), 'six.moves.zip', 'six.moves.zip', (['grads', 'expect_grads_none'], {}), '(grads, expect_grads_none)\n', (2130, 2156), False, 'import six\n'), ((699, 716), 'numpy.prod', 'numpy.prod', (['shape'], {}), '(shape)\n', (709, 716), False, 'import numpy\n'), ((1450, 1471), 'numpy.prod', 'numpy.prod', (['arr.shape'], {}), '(arr.shape)\n', (1460, 1471), False, 'import numpy\n'), ((2799, 2820), 'chainer.utils.force_array', 'utils.force_array', (['y1'], {}), '(y1)\n', (2816, 2820), False, 'from chainer import utils\n'), ((2822, 2843), 'chainer.utils.force_array', 'utils.force_array', (['y2'], {}), '(y2)\n', (2839, 2843), False, 'from chainer import utils\n'), ((7370, 7433), 'chainer.testing.product', 'testing.product', (["{'shape': [(3, 2), (2,), (1,), (), (2, 0, 3)]}"], {}), "({'shape': [(3, 2), (2,), (1,), (), (2, 0, 3)]})\n", (7385, 7433), False, 'from chainer import testing\n'), ((9537, 9589), 'chainer.testing.product', 'testing.product', (["{'shape': [(3, 2), (2,), (1,), ()]}"], {}), "({'shape': [(3, 2), (2,), (1,), ()]})\n", (9552, 9589), False, 'from chainer import testing\n'), ((11182, 11234), 'chainer.testing.product', 'testing.product', (["{'shape': [(3, 2), (2,), (1,), ()]}"], {}), "({'shape': [(3, 2), (2,), (1,), ()]})\n", (11197, 11234), False, 'from chainer import testing\n'), ((15194, 15246), 'chainer.testing.product', 'testing.product', (["{'shape': [(3, 2), (2,), (1,), ()]}"], {}), "({'shape': [(3, 2), (2,), (1,), ()]})\n", (15209, 15246), False, 'from chainer import testing\n'), ((18674, 18739), 'chainer.testing.function_link._check_contiguousness', 'testing.function_link._check_contiguousness', (['arr', 'self.contiguous'], {}), '(arr, self.contiguous)\n', (18717, 18739), False, 'from chainer import testing\n'), ((19461, 19600), 'chainer.testing.product', 'testing.product', (["{'shape': [(3, 2), (2,), (1, 2)], 'contiguous': [None, 'C'], 'check_on': [\n 'forward_input', 'backward_retained_input']}"], {}), "({'shape': [(3, 2), (2,), (1, 2)], 'contiguous': [None, 'C'],\n 'check_on': ['forward_input', 'backward_retained_input']})\n", (19476, 19600), False, 'from chainer import testing\n'), ((21693, 21734), 'chainer.backend.get_array_module', 'chainer.backend.get_array_module', (['*inputs'], {}), '(*inputs)\n', (21725, 21734), False, 'import chainer\n'), ((22376, 22409), 'chainer.functions.matmul', 'chainer.functions.matmul', (['gy', 'p.T'], {}), '(gy, p.T)\n', (22400, 22409), False, 'import chainer\n'), ((22423, 22456), 'chainer.functions.matmul', 'chainer.functions.matmul', (['x.T', 'gy'], {}), '(x.T, gy)\n', (22447, 22456), False, 'import chainer\n'), ((27631, 27647), 'numpy.array', 'numpy.array', (['[1]'], {}), '([1])\n', (27642, 27647), False, 'import numpy\n'), ((28967, 29073), 'chainer.testing.product', 'testing.product', (["{'contiguous': [None, 'C'], 'check_on': ['forward_input',\n 'backward_retained_input']}"], {}), "({'contiguous': [None, 'C'], 'check_on': ['forward_input',\n 'backward_retained_input']})\n", (28982, 29073), False, 'from chainer import testing\n'), ((4481, 4502), 'chainer.utils.force_array', 'utils.force_array', (['y1'], {}), '(y1)\n', (4498, 4502), False, 'from chainer import utils\n'), ((4504, 4525), 'chainer.utils.force_array', 'utils.force_array', (['y2'], {}), '(y2)\n', (4521, 4525), False, 'from chainer import utils\n'), ((6092, 6115), 'chainer.utils.force_array', 'utils.force_array', (['ggx1'], {}), '(ggx1)\n', (6109, 6115), False, 'from chainer import utils\n'), ((6117, 6140), 'chainer.utils.force_array', 'utils.force_array', (['ggx2'], {}), '(ggx2)\n', (6134, 6140), False, 'from chainer import utils\n'), ((9315, 9336), 'chainer.utils.force_array', 'utils.force_array', (['y1'], {}), '(y1)\n', (9332, 9336), False, 'from chainer import utils\n'), ((9338, 9359), 'chainer.utils.force_array', 'utils.force_array', (['y2'], {}), '(y2)\n', (9355, 9359), False, 'from chainer import utils\n'), ((10667, 10688), 'chainer.utils.force_array', 'utils.force_array', (['y1'], {}), '(y1)\n', (10684, 10688), False, 'from chainer import utils\n'), ((10690, 10711), 'chainer.utils.force_array', 'utils.force_array', (['y2'], {}), '(y2)\n', (10707, 10711), False, 'from chainer import utils\n'), ((11108, 11131), 'chainer.utils.force_array', 'utils.force_array', (['ggx1'], {}), '(ggx1)\n', (11125, 11131), False, 'from chainer import utils\n'), ((11133, 11156), 'chainer.utils.force_array', 'utils.force_array', (['ggx2'], {}), '(ggx2)\n', (11150, 11156), False, 'from chainer import utils\n'), ((13455, 13476), 'chainer.utils.force_array', 'utils.force_array', (['y1'], {}), '(y1)\n', (13472, 13476), False, 'from chainer import utils\n'), ((13478, 13499), 'chainer.utils.force_array', 'utils.force_array', (['y2'], {}), '(y2)\n', (13495, 13499), False, 'from chainer import utils\n'), ((14565, 14588), 'chainer.utils.force_array', 'utils.force_array', (['ggx1'], {}), '(ggx1)\n', (14582, 14588), False, 'from chainer import utils\n'), ((14590, 14613), 'chainer.utils.force_array', 'utils.force_array', (['ggx2'], {}), '(ggx2)\n', (14607, 14613), False, 'from chainer import utils\n'), ((17460, 17485), 'chainer.get_array_types', 'chainer.get_array_types', ([], {}), '()\n', (17483, 17485), False, 'import chainer\n'), ((17819, 17840), 'chainer.utils.force_array', 'utils.force_array', (['y1'], {}), '(y1)\n', (17836, 17840), False, 'from chainer import utils\n'), ((17842, 17863), 'chainer.utils.force_array', 'utils.force_array', (['y2'], {}), '(y2)\n', (17859, 17863), False, 'from chainer import utils\n'), ((18954, 18977), 'chainer.utils.force_array', 'utils.force_array', (['ggx1'], {}), '(ggx1)\n', (18971, 18977), False, 'from chainer import utils\n'), ((18979, 19002), 'chainer.utils.force_array', 'utils.force_array', (['ggx2'], {}), '(ggx2)\n', (18996, 19002), False, 'from chainer import utils\n'), ((22676, 22701), 'chainer.get_array_types', 'chainer.get_array_types', ([], {}), '()\n', (22699, 22701), False, 'import chainer\n'), ((23122, 23177), 'chainer.Parameter', 'chainer.Parameter', (['initial_p'], {'shape': '(in_size, out_size)'}), '(initial_p, shape=(in_size, out_size))\n', (23139, 23177), False, 'import chainer\n'), ((24688, 24712), 'chainer.initializers.Constant', 'initializers.Constant', (['(0)'], {}), '(0)\n', (24709, 24712), False, 'from chainer import initializers\n'), ((25543, 25558), 'numpy.dot', 'numpy.dot', (['x', 'p'], {}), '(x, p)\n', (25552, 25558), False, 'import numpy\n'), ((7570, 7609), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', 'self.shape'], {}), '(-1, 1, self.shape)\n', (7590, 7609), False, 'import numpy\n'), ((7645, 7684), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', 'self.shape'], {}), '(-1, 1, self.shape)\n', (7665, 7684), False, 'import numpy\n'), ((8072, 8107), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', '(3, 2)'], {}), '(-1, 1, (3, 2))\n', (8092, 8107), False, 'import numpy\n'), ((8143, 8178), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', '(3, 2)'], {}), '(-1, 1, (3, 2))\n', (8163, 8178), False, 'import numpy\n'), ((9865, 9904), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', 'self.shape'], {}), '(-1, 1, self.shape)\n', (9885, 9904), False, 'import numpy\n'), ((9940, 9979), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', 'self.shape'], {}), '(-1, 1, self.shape)\n', (9960, 9979), False, 'import numpy\n'), ((11532, 11571), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', 'self.shape'], {}), '(-1, 1, self.shape)\n', (11552, 11571), False, 'import numpy\n'), ((11607, 11646), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', 'self.shape'], {}), '(-1, 1, self.shape)\n', (11627, 11646), False, 'import numpy\n'), ((12190, 12225), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', '(3, 2)'], {}), '(-1, 1, (3, 2))\n', (12210, 12225), False, 'import numpy\n'), ((12261, 12296), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', '(3, 2)'], {}), '(-1, 1, (3, 2))\n', (12281, 12296), False, 'import numpy\n'), ((15543, 15582), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', 'self.shape'], {}), '(-1, 1, self.shape)\n', (15563, 15582), False, 'import numpy\n'), ((15618, 15657), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', 'self.shape'], {}), '(-1, 1, self.shape)\n', (15638, 15657), False, 'import numpy\n'), ((16215, 16250), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', '(3, 2)'], {}), '(-1, 1, (3, 2))\n', (16235, 16250), False, 'import numpy\n'), ((16286, 16321), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', '(3, 2)'], {}), '(-1, 1, (3, 2))\n', (16306, 16321), False, 'import numpy\n'), ((20264, 20303), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', 'self.shape'], {}), '(-1, 1, self.shape)\n', (20284, 20303), False, 'import numpy\n'), ((20339, 20378), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', 'self.shape'], {}), '(-1, 1, self.shape)\n', (20359, 20378), False, 'import numpy\n'), ((20992, 21011), 'unittest.SkipTest', 'unittest.SkipTest', ([], {}), '()\n', (21009, 21011), False, 'import unittest\n'), ((21142, 21161), 'unittest.SkipTest', 'unittest.SkipTest', ([], {}), '()\n', (21159, 21161), False, 'import unittest\n'), ((23076, 23100), 'chainer.initializers.Constant', 'initializers.Constant', (['(1)'], {}), '(1)\n', (23097, 23100), False, 'from chainer import initializers\n'), ((25660, 25684), 'chainer.initializers.Constant', 'initializers.Constant', (['(0)'], {}), '(0)\n', (25681, 25684), False, 'from chainer import initializers\n'), ((30015, 30034), 'unittest.SkipTest', 'unittest.SkipTest', ([], {}), '()\n', (30032, 30034), False, 'import unittest\n'), ((8335, 8389), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', 'output_templates[1].shape'], {}), '(-1, 1, output_templates[1].shape)\n', (8355, 8389), False, 'import numpy\n'), ((8564, 8617), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', 'input_templates[0].shape'], {}), '(-1, 1, input_templates[0].shape)\n', (8584, 8617), False, 'import numpy\n'), ((12453, 12507), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', 'output_templates[1].shape'], {}), '(-1, 1, output_templates[1].shape)\n', (12473, 12507), False, 'import numpy\n'), ((16478, 16532), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', 'output_templates[1].shape'], {}), '(-1, 1, output_templates[1].shape)\n', (16498, 16532), False, 'import numpy\n'), ((16707, 16760), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', 'input_templates[0].shape'], {}), '(-1, 1, input_templates[0].shape)\n', (16727, 16760), False, 'import numpy\n'), ((25011, 25059), 'numpy.random.uniform', 'numpy.random.uniform', (['(-1)', '(1)', '(in_size, out_size)'], {}), '(-1, 1, (in_size, out_size))\n', (25031, 25059), False, 'import numpy\n'), ((25333, 25372), 'numpy.random.rand', 'numpy.random.rand', (['self.n', 'self.in_size'], {}), '(self.n, self.in_size)\n', (25350, 25372), False, 'import numpy\n'), ((25735, 25759), 'chainer.initializers.Constant', 'initializers.Constant', (['(1)'], {}), '(1)\n', (25756, 25759), False, 'from chainer import initializers\n'), ((27762, 27778), 'numpy.array', 'numpy.array', (['[1]'], {}), '([1])\n', (27773, 27778), False, 'import numpy\n'), ((27807, 27823), 'numpy.array', 'numpy.array', (['[1]'], {}), '([1])\n', (27818, 27823), False, 'import numpy\n'), ((28623, 28639), 'numpy.array', 'numpy.array', (['[1]'], {}), '([1])\n', (28634, 28639), False, 'import numpy\n'), ((28668, 28684), 'numpy.array', 'numpy.array', (['[1]'], {}), '([1])\n', (28679, 28684), False, 'import numpy\n')] |
#!/usr/bin/env python
# wujian@2019
"""
Compute labels for DC (Deep Clustering) training:
-1 means silence
0...N for each speaker
"""
import argparse
import numpy as np
from libs.opts import StftParser
from libs.data_handler import SpectrogramReader, NumpyWriter
from libs.utils import get_logger, EPSILON
logger = get_logger(__name__)
def run(args):
# shape: T x F
stft_kwargs = {
"frame_len": args.frame_len,
"frame_hop": args.frame_hop,
"round_power_of_two": args.round_power_of_two,
"window": args.window,
"center": args.center,
"apply_abs": True,
}
spk_scps = args.spks.split(",")
if len(spk_scps) < 2:
raise RuntimeError("Please give at least 2 speakers")
mix_reader = SpectrogramReader(args.mix, **stft_kwargs)
spk_reader = [SpectrogramReader(spk, **stft_kwargs) for spk in spk_scps]
with NumpyWriter(args.dir) as writer:
for key, mix in mix_reader:
T, F = mix.shape
masks = np.zeros_like(mix, dtype=np.float32)
# sil: -1
mix_2db = 20 * np.log10(np.maximum(mix, EPSILON))
sil_idx = mix_2db < (np.max(mix_2db) - args.beta)
masks[sil_idx] = -1
logger.info("For {}, silence covered {:.2f}%".format(
key,
np.sum(sil_idx) * 100 / (T * F)))
# for each speaker
act_idx = ~sil_idx
labels = np.argmax(np.stack([reader[key]
for reader in spk_reader]),
axis=0)
masks[act_idx] = labels[act_idx]
writer.write(key, masks)
logger.info("Processed {:d} utterances done".format(len(mix_reader)))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Command to compute labels for DC (Deep Clustering) "
"training, -1 means silence, 0..N for each speaker",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
parents=[StftParser.parser])
parser.add_argument("mix", type=str, help="Rspecifier for mixture")
parser.add_argument("spks",
type=str,
help="Rspecifier for multiple speakers, "
"separated by \',\', egs: spk1.scp,spk2.scp")
parser.add_argument("dir",
type=str,
help="Directory to store computed labels")
parser.add_argument("--beta",
type=float,
default=40,
help="Threshold to discriminate silence bins (in dB)")
args = parser.parse_args()
run(args) | [
"numpy.stack",
"libs.utils.get_logger",
"numpy.zeros_like",
"numpy.maximum",
"argparse.ArgumentParser",
"numpy.sum",
"libs.data_handler.SpectrogramReader",
"libs.data_handler.NumpyWriter",
"numpy.max"
] | [((327, 347), 'libs.utils.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (337, 347), False, 'from libs.utils import get_logger, EPSILON\n'), ((769, 811), 'libs.data_handler.SpectrogramReader', 'SpectrogramReader', (['args.mix'], {}), '(args.mix, **stft_kwargs)\n', (786, 811), False, 'from libs.data_handler import SpectrogramReader, NumpyWriter\n'), ((1790, 2029), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Command to compute labels for DC (Deep Clustering) training, -1 means silence, 0..N for each speaker"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter', 'parents': '[StftParser.parser]'}), "(description=\n 'Command to compute labels for DC (Deep Clustering) training, -1 means silence, 0..N for each speaker'\n , formatter_class=argparse.ArgumentDefaultsHelpFormatter, parents=[\n StftParser.parser])\n", (1813, 2029), False, 'import argparse\n'), ((830, 867), 'libs.data_handler.SpectrogramReader', 'SpectrogramReader', (['spk'], {}), '(spk, **stft_kwargs)\n', (847, 867), False, 'from libs.data_handler import SpectrogramReader, NumpyWriter\n'), ((899, 920), 'libs.data_handler.NumpyWriter', 'NumpyWriter', (['args.dir'], {}), '(args.dir)\n', (910, 920), False, 'from libs.data_handler import SpectrogramReader, NumpyWriter\n'), ((1017, 1053), 'numpy.zeros_like', 'np.zeros_like', (['mix'], {'dtype': 'np.float32'}), '(mix, dtype=np.float32)\n', (1030, 1053), True, 'import numpy as np\n'), ((1462, 1510), 'numpy.stack', 'np.stack', (['[reader[key] for reader in spk_reader]'], {}), '([reader[key] for reader in spk_reader])\n', (1470, 1510), True, 'import numpy as np\n'), ((1112, 1136), 'numpy.maximum', 'np.maximum', (['mix', 'EPSILON'], {}), '(mix, EPSILON)\n', (1122, 1136), True, 'import numpy as np\n'), ((1171, 1186), 'numpy.max', 'np.max', (['mix_2db'], {}), '(mix_2db)\n', (1177, 1186), True, 'import numpy as np\n'), ((1335, 1350), 'numpy.sum', 'np.sum', (['sil_idx'], {}), '(sil_idx)\n', (1341, 1350), True, 'import numpy as np\n')] |
# Copyright 2020 University of New South Wales, University of Sydney, Ingham Institute
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from loguru import logger
import numpy as np
import SimpleITK as sitk
from scipy.optimize import curve_fit
from scipy.ndimage import filters
from scipy.stats import norm as scipy_norm
from platipy.imaging.label.fusion import combine_labels, process_probability_image
from platipy.imaging.label.projection import (
evaluate_distance_on_surface,
evaluate_distance_to_reference,
regrid_spherical_data,
)
def median_absolute_deviation(data, axis=None):
"""Median Absolute Deviation: a "Robust" version of standard deviation.
Indices variabililty of the sample.
https://en.wikipedia.org/wiki/Median_absolute_deviation
"""
return np.median(np.abs(data - np.median(data, axis=axis)), axis=axis)
def gaussian_curve(x, a, m, s):
"""Returns a Gaussian (normal) curve
Args:
x (np.ndarray): values to sample the normal curve
a (float): magnitude
m (float): location (mean)
s (float): scale (standard deviation)
Returns:
np.ndarray: sampled values along the normal curve
"""
return a * scipy_norm.pdf(x, loc=m, scale=s)
def run_iar(
atlas_set,
reference_structure,
smooth_distance_maps=False,
smooth_sigma=1,
z_score_statistic="MAD",
outlier_method="IQR",
min_best_atlases=10,
outlier_factor=1.5,
iteration=0,
single_step=False,
project_on_sphere=False,
label="DIR",
):
"""
Perform iterative atlas removal on the atlas_set
"""
if iteration == 0:
# Run some checks in the data
# If there is a MAJOR error we need to check
# Begin the process
logger.info("Iterative atlas removal: ")
logger.info(" Beginning process")
# Get remaining case identifiers to loop through
remaining_id_list = list(atlas_set.keys())
# Generate the surface projections
# 1. Set the consensus surface using the reference volume
probability_label = combine_labels(atlas_set, reference_structure, label=label)[
reference_structure
]
# Modify resolution for better statistics
if project_on_sphere:
if len(remaining_id_list) < 12:
logger.info(" Less than 12 atlases, resolution set: 3x3 sqr deg")
resolution = 3
elif len(remaining_id_list) < 7:
logger.info(" Less than 7 atlases, resolution set: 6x6 sqr deg")
resolution = 6
else:
resolution = 1
else:
if len(remaining_id_list) < 12:
logger.info(" Less than 12 atlases, resample factor set: 5")
resample_factor = 5
elif len(remaining_id_list) < 7:
logger.info(" Less than 7 atlases, resolution set: 6x6 sqr deg")
resample_factor = 10
else:
resample_factor = 1
g_val_list = []
logger.info(" Calculating surface distance maps: ")
for test_id in remaining_id_list:
logger.info(" {0}".format(test_id))
# 2. Calculate the distance from the surface to the consensus surface
test_volume = atlas_set[test_id][label][reference_structure]
# This next step ensures non-binary labels are treated properly
# We use 0.1 to capture the outer edge of the test delineation, if it is probabilistic
test_volume = process_probability_image(test_volume, 0.1)
if project_on_sphere:
reference_volume = process_probability_image(probability_label, threshold=0.999)
# note: we use a threshold slightly below 1 to ensure the consensus (reference) volume
# is a suitable binary volume
# Compute the reference distance map
reference_distance_map = sitk.Abs(
sitk.SignedMaurerDistanceMap(
reference_volume, squaredDistance=False, useImageSpacing=True
)
)
# Compute the distance to test surfaces, across the surface of the reference
theta, phi, values = evaluate_distance_on_surface(
reference_distance_map, test_volume, reference_as_distance_map=True
)
_, _, g_vals = regrid_spherical_data(theta, phi, values, resolution=resolution)
g_val_list.append(g_vals)
else:
reference_volume = process_probability_image(probability_label, threshold=0.95)
# note: we use a threshold slightly below 1 to ensure the consensus (reference) volume
# is a suitable binary volume we have the flexibility to modify the reference volume
# when we do not use spherical projection a larger surface means more evaluations and
# better statistics, so we prefer a lower threshold but not too low,
# or it may include some errors
# Compute distance to reference, from the test volume
values = evaluate_distance_to_reference(
reference_volume, test_volume, resample_factor=resample_factor
)
g_val_list.append(values)
q_results = {}
for i, (test_id, g_vals) in enumerate(zip(remaining_id_list, g_val_list)):
g_val_list_test = g_val_list[:]
g_val_list_test.pop(i)
if project_on_sphere and smooth_distance_maps:
g_vals = filters.gaussian_filter(g_vals, sigma=smooth_sigma, mode="wrap")
# b) i] Compute the Z-scores over the projected surface
if z_score_statistic.lower() == "std":
g_val_mean = np.mean(g_val_list_test, axis=0)
g_val_std = np.std(g_val_list_test, axis=0)
if np.any(g_val_std == 0):
logger.info(" Std Dev zero count: {0}".format(np.sum(g_val_std == 0)))
g_val_std[g_val_std == 0] = g_val_std.mean()
z_score_vals_array = (g_vals - g_val_mean) / g_val_std
elif z_score_statistic.lower() == "mad":
g_val_median = np.median(g_val_list_test, axis=0)
g_val_mad = 1.4826 * median_absolute_deviation(g_val_list_test, axis=0)
if np.any(~np.isfinite(g_val_mad)):
logger.info("Error in MAD")
logger.info(g_val_mad)
if np.any(g_val_mad == 0):
logger.info(" MAD zero count: {0}".format(np.sum(g_val_mad == 0)))
g_val_mad[g_val_mad == 0] = np.median(g_val_mad)
z_score_vals_array = (g_vals - g_val_median) / g_val_mad
else:
logger.error(" Error!")
logger.error(" z_score must be one of: MAD, STD")
sys.exit()
z_score_vals = np.ravel(z_score_vals_array)
logger.debug(" [{0}] Statistics of mZ-scores".format(test_id))
logger.debug(" Min(Z) = {0:.2f}".format(z_score_vals.min()))
logger.debug(" Q1(Z) = {0:.2f}".format(np.percentile(z_score_vals, 25)))
logger.debug(" Mean(Z) = {0:.2f}".format(z_score_vals.mean()))
logger.debug(" Median(Z) = {0:.2f}".format(np.percentile(z_score_vals, 50)))
logger.debug(" Q3(Z) = {0:.2f}".format(np.percentile(z_score_vals, 75)))
logger.debug(" Max(Z) = {0:.2f}\n".format(z_score_vals.max()))
# Calculate excess area from Gaussian: the Q-metric
bins = np.linspace(-15, 15, 501)
z_density, bin_edges = np.histogram(z_score_vals, bins=bins, density=True)
bin_centers = (bin_edges[1:] + bin_edges[:-1]) / 2.0
try:
popt, _ = curve_fit( # pylint: disable=unbalanced-tuple-unpacking
f=gaussian_curve, xdata=bin_centers, ydata=z_density
)
z_ideal = gaussian_curve(bin_centers, *popt)
z_diff = np.abs(z_density - z_ideal)
except (RuntimeError, ValueError):
logger.debug("IAR couldnt fit curve, estimating with sampled statistics.")
z_ideal = gaussian_curve(bin_centers, a=1, m=z_density.mean(), s=z_density.std())
z_diff = np.abs(z_density - z_ideal)
# Integrate to get the q_value
q_value = np.trapz(z_diff * np.abs(bin_centers) ** 2, bin_centers)
q_results[test_id] = np.float64(q_value)
# Exclude (at most) the worst 3 atlases for outlier detection
# With a minimum number, this helps provide more robust estimates at low numbers
result_list = list(q_results.values())
result_list = [r for r in result_list if ~np.isnan(r) and np.isfinite(r)]
best_results = np.sort(result_list)[: max([min_best_atlases, len(result_list) - 3])]
if outlier_method.lower() == "iqr":
outlier_limit = np.percentile(best_results, 75, axis=0) + outlier_factor * np.subtract(
*np.percentile(best_results, [75, 25], axis=0)
)
elif outlier_method.lower() == "std":
outlier_limit = np.mean(best_results, axis=0) + outlier_factor * np.std(
best_results, axis=0
)
else:
logger.error(" Error!")
logger.error(" outlier_method must be one of: IQR, STD")
sys.exit()
logger.info(" Analysing results")
logger.info(" Outlier limit: {0:06.3f}".format(outlier_limit))
keep_id_list = []
logger.info(
"{0},{1},{2},{3:.4g}\n".format(
iteration,
" ".join(remaining_id_list),
" ".join(["{0:.4g}".format(i) for i in list(q_results.values())]),
outlier_limit,
)
)
for idx, result in q_results.items():
accept = result <= outlier_limit
logger.info(
" {0}: Q = {1:06.3f} [{2}]".format(
idx, result, {True: "KEEP", False: "REMOVE"}[accept]
)
)
if accept:
keep_id_list.append(idx)
if len(keep_id_list) < len(remaining_id_list):
logger.info("\n Step {0} Complete".format(iteration))
logger.info(" Num. Removed = {0} --\n".format(len(remaining_id_list) - len(keep_id_list)))
iteration += 1
atlas_set_new = {i: atlas_set[i] for i in keep_id_list}
if single_step:
return atlas_set_new
return run_iar(
atlas_set=atlas_set_new,
reference_structure=reference_structure,
smooth_distance_maps=smooth_distance_maps,
smooth_sigma=smooth_sigma,
z_score_statistic=z_score_statistic,
outlier_method=outlier_method,
min_best_atlases=min_best_atlases,
outlier_factor=outlier_factor,
iteration=iteration,
project_on_sphere=project_on_sphere,
label=label,
)
logger.info(" End point reached. Keeping:\n {0}".format(keep_id_list))
return atlas_set
| [
"numpy.abs",
"numpy.sum",
"numpy.ravel",
"numpy.isnan",
"numpy.histogram",
"numpy.mean",
"numpy.float64",
"platipy.imaging.label.fusion.combine_labels",
"numpy.std",
"platipy.imaging.label.projection.evaluate_distance_to_reference",
"numpy.isfinite",
"numpy.linspace",
"platipy.imaging.label.... | [((3467, 3519), 'loguru.logger.info', 'logger.info', (['""" Calculating surface distance maps: """'], {}), "(' Calculating surface distance maps: ')\n", (3478, 3519), False, 'from loguru import logger\n'), ((9678, 9712), 'loguru.logger.info', 'logger.info', (['""" Analysing results"""'], {}), "(' Analysing results')\n", (9689, 9712), False, 'from loguru import logger\n'), ((1719, 1752), 'scipy.stats.norm.pdf', 'scipy_norm.pdf', (['x'], {'loc': 'm', 'scale': 's'}), '(x, loc=m, scale=s)\n', (1733, 1752), True, 'from scipy.stats import norm as scipy_norm\n'), ((2274, 2314), 'loguru.logger.info', 'logger.info', (['"""Iterative atlas removal: """'], {}), "('Iterative atlas removal: ')\n", (2285, 2314), False, 'from loguru import logger\n'), ((2323, 2357), 'loguru.logger.info', 'logger.info', (['""" Beginning process"""'], {}), "(' Beginning process')\n", (2334, 2357), False, 'from loguru import logger\n'), ((2587, 2646), 'platipy.imaging.label.fusion.combine_labels', 'combine_labels', (['atlas_set', 'reference_structure'], {'label': 'label'}), '(atlas_set, reference_structure, label=label)\n', (2601, 2646), False, 'from platipy.imaging.label.fusion import combine_labels, process_probability_image\n'), ((3945, 3988), 'platipy.imaging.label.fusion.process_probability_image', 'process_probability_image', (['test_volume', '(0.1)'], {}), '(test_volume, 0.1)\n', (3970, 3988), False, 'from platipy.imaging.label.fusion import combine_labels, process_probability_image\n'), ((7225, 7253), 'numpy.ravel', 'np.ravel', (['z_score_vals_array'], {}), '(z_score_vals_array)\n', (7233, 7253), True, 'import numpy as np\n'), ((7923, 7948), 'numpy.linspace', 'np.linspace', (['(-15)', '(15)', '(501)'], {}), '(-15, 15, 501)\n', (7934, 7948), True, 'import numpy as np\n'), ((7980, 8031), 'numpy.histogram', 'np.histogram', (['z_score_vals'], {'bins': 'bins', 'density': '(True)'}), '(z_score_vals, bins=bins, density=True)\n', (7992, 8031), True, 'import numpy as np\n'), ((8793, 8812), 'numpy.float64', 'np.float64', (['q_value'], {}), '(q_value)\n', (8803, 8812), True, 'import numpy as np\n'), ((9105, 9125), 'numpy.sort', 'np.sort', (['result_list'], {}), '(result_list)\n', (9112, 9125), True, 'import numpy as np\n'), ((2807, 2873), 'loguru.logger.info', 'logger.info', (['""" Less than 12 atlases, resolution set: 3x3 sqr deg"""'], {}), "(' Less than 12 atlases, resolution set: 3x3 sqr deg')\n", (2818, 2873), False, 'from loguru import logger\n'), ((3150, 3211), 'loguru.logger.info', 'logger.info', (['""" Less than 12 atlases, resample factor set: 5"""'], {}), "(' Less than 12 atlases, resample factor set: 5')\n", (3161, 3211), False, 'from loguru import logger\n'), ((4051, 4112), 'platipy.imaging.label.fusion.process_probability_image', 'process_probability_image', (['probability_label'], {'threshold': '(0.999)'}), '(probability_label, threshold=0.999)\n', (4076, 4112), False, 'from platipy.imaging.label.fusion import combine_labels, process_probability_image\n'), ((4634, 4735), 'platipy.imaging.label.projection.evaluate_distance_on_surface', 'evaluate_distance_on_surface', (['reference_distance_map', 'test_volume'], {'reference_as_distance_map': '(True)'}), '(reference_distance_map, test_volume,\n reference_as_distance_map=True)\n', (4662, 4735), False, 'from platipy.imaging.label.projection import evaluate_distance_on_surface, evaluate_distance_to_reference, regrid_spherical_data\n'), ((4790, 4854), 'platipy.imaging.label.projection.regrid_spherical_data', 'regrid_spherical_data', (['theta', 'phi', 'values'], {'resolution': 'resolution'}), '(theta, phi, values, resolution=resolution)\n', (4811, 4854), False, 'from platipy.imaging.label.projection import evaluate_distance_on_surface, evaluate_distance_to_reference, regrid_spherical_data\n'), ((4939, 4999), 'platipy.imaging.label.fusion.process_probability_image', 'process_probability_image', (['probability_label'], {'threshold': '(0.95)'}), '(probability_label, threshold=0.95)\n', (4964, 4999), False, 'from platipy.imaging.label.fusion import combine_labels, process_probability_image\n'), ((5507, 5605), 'platipy.imaging.label.projection.evaluate_distance_to_reference', 'evaluate_distance_to_reference', (['reference_volume', 'test_volume'], {'resample_factor': 'resample_factor'}), '(reference_volume, test_volume,\n resample_factor=resample_factor)\n', (5537, 5605), False, 'from platipy.imaging.label.projection import evaluate_distance_on_surface, evaluate_distance_to_reference, regrid_spherical_data\n'), ((5920, 5984), 'scipy.ndimage.filters.gaussian_filter', 'filters.gaussian_filter', (['g_vals'], {'sigma': 'smooth_sigma', 'mode': '"""wrap"""'}), "(g_vals, sigma=smooth_sigma, mode='wrap')\n", (5943, 5984), False, 'from scipy.ndimage import filters\n'), ((6128, 6160), 'numpy.mean', 'np.mean', (['g_val_list_test'], {'axis': '(0)'}), '(g_val_list_test, axis=0)\n', (6135, 6160), True, 'import numpy as np\n'), ((6185, 6216), 'numpy.std', 'np.std', (['g_val_list_test'], {'axis': '(0)'}), '(g_val_list_test, axis=0)\n', (6191, 6216), True, 'import numpy as np\n'), ((6233, 6255), 'numpy.any', 'np.any', (['(g_val_std == 0)'], {}), '(g_val_std == 0)\n', (6239, 6255), True, 'import numpy as np\n'), ((8129, 8192), 'scipy.optimize.curve_fit', 'curve_fit', ([], {'f': 'gaussian_curve', 'xdata': 'bin_centers', 'ydata': 'z_density'}), '(f=gaussian_curve, xdata=bin_centers, ydata=z_density)\n', (8138, 8192), False, 'from scipy.optimize import curve_fit\n'), ((8348, 8375), 'numpy.abs', 'np.abs', (['(z_density - z_ideal)'], {}), '(z_density - z_ideal)\n', (8354, 8375), True, 'import numpy as np\n'), ((9240, 9279), 'numpy.percentile', 'np.percentile', (['best_results', '(75)'], {'axis': '(0)'}), '(best_results, 75, axis=0)\n', (9253, 9279), True, 'import numpy as np\n'), ((9565, 9588), 'loguru.logger.error', 'logger.error', (['""" Error!"""'], {}), "(' Error!')\n", (9577, 9588), False, 'from loguru import logger\n'), ((9597, 9653), 'loguru.logger.error', 'logger.error', (['""" outlier_method must be one of: IQR, STD"""'], {}), "(' outlier_method must be one of: IQR, STD')\n", (9609, 9653), False, 'from loguru import logger\n'), ((9662, 9672), 'sys.exit', 'sys.exit', ([], {}), '()\n', (9670, 9672), False, 'import sys\n'), ((1330, 1356), 'numpy.median', 'np.median', (['data'], {'axis': 'axis'}), '(data, axis=axis)\n', (1339, 1356), True, 'import numpy as np\n'), ((2954, 3019), 'loguru.logger.info', 'logger.info', (['""" Less than 7 atlases, resolution set: 6x6 sqr deg"""'], {}), "(' Less than 7 atlases, resolution set: 6x6 sqr deg')\n", (2965, 3019), False, 'from loguru import logger\n'), ((3297, 3362), 'loguru.logger.info', 'logger.info', (['""" Less than 7 atlases, resolution set: 6x6 sqr deg"""'], {}), "(' Less than 7 atlases, resolution set: 6x6 sqr deg')\n", (3308, 3362), False, 'from loguru import logger\n'), ((4367, 4462), 'SimpleITK.SignedMaurerDistanceMap', 'sitk.SignedMaurerDistanceMap', (['reference_volume'], {'squaredDistance': '(False)', 'useImageSpacing': '(True)'}), '(reference_volume, squaredDistance=False,\n useImageSpacing=True)\n', (4395, 4462), True, 'import SimpleITK as sitk\n'), ((6553, 6587), 'numpy.median', 'np.median', (['g_val_list_test'], {'axis': '(0)'}), '(g_val_list_test, axis=0)\n', (6562, 6587), True, 'import numpy as np\n'), ((6820, 6842), 'numpy.any', 'np.any', (['(g_val_mad == 0)'], {}), '(g_val_mad == 0)\n', (6826, 6842), True, 'import numpy as np\n'), ((7092, 7115), 'loguru.logger.error', 'logger.error', (['""" Error!"""'], {}), "(' Error!')\n", (7104, 7115), False, 'from loguru import logger\n'), ((7128, 7177), 'loguru.logger.error', 'logger.error', (['""" z_score must be one of: MAD, STD"""'], {}), "(' z_score must be one of: MAD, STD')\n", (7140, 7177), False, 'from loguru import logger\n'), ((7190, 7200), 'sys.exit', 'sys.exit', ([], {}), '()\n', (7198, 7200), False, 'import sys\n'), ((7468, 7499), 'numpy.percentile', 'np.percentile', (['z_score_vals', '(25)'], {}), '(z_score_vals, 25)\n', (7481, 7499), True, 'import numpy as np\n'), ((7640, 7671), 'numpy.percentile', 'np.percentile', (['z_score_vals', '(50)'], {}), '(z_score_vals, 50)\n', (7653, 7671), True, 'import numpy as np\n'), ((7732, 7763), 'numpy.percentile', 'np.percentile', (['z_score_vals', '(75)'], {}), '(z_score_vals, 75)\n', (7745, 7763), True, 'import numpy as np\n'), ((8431, 8505), 'loguru.logger.debug', 'logger.debug', (['"""IAR couldnt fit curve, estimating with sampled statistics."""'], {}), "('IAR couldnt fit curve, estimating with sampled statistics.')\n", (8443, 8505), False, 'from loguru import logger\n'), ((8621, 8648), 'numpy.abs', 'np.abs', (['(z_density - z_ideal)'], {}), '(z_density - z_ideal)\n', (8627, 8648), True, 'import numpy as np\n'), ((9070, 9084), 'numpy.isfinite', 'np.isfinite', (['r'], {}), '(r)\n', (9081, 9084), True, 'import numpy as np\n'), ((9447, 9476), 'numpy.mean', 'np.mean', (['best_results'], {'axis': '(0)'}), '(best_results, axis=0)\n', (9454, 9476), True, 'import numpy as np\n'), ((6737, 6764), 'loguru.logger.info', 'logger.info', (['"""Error in MAD"""'], {}), "('Error in MAD')\n", (6748, 6764), False, 'from loguru import logger\n'), ((6781, 6803), 'loguru.logger.info', 'logger.info', (['g_val_mad'], {}), '(g_val_mad)\n', (6792, 6803), False, 'from loguru import logger\n'), ((6974, 6994), 'numpy.median', 'np.median', (['g_val_mad'], {}), '(g_val_mad)\n', (6983, 6994), True, 'import numpy as np\n'), ((8725, 8744), 'numpy.abs', 'np.abs', (['bin_centers'], {}), '(bin_centers)\n', (8731, 8744), True, 'import numpy as np\n'), ((9054, 9065), 'numpy.isnan', 'np.isnan', (['r'], {}), '(r)\n', (9062, 9065), True, 'import numpy as np\n'), ((9496, 9524), 'numpy.std', 'np.std', (['best_results'], {'axis': '(0)'}), '(best_results, axis=0)\n', (9502, 9524), True, 'import numpy as np\n'), ((6322, 6344), 'numpy.sum', 'np.sum', (['(g_val_std == 0)'], {}), '(g_val_std == 0)\n', (6328, 6344), True, 'import numpy as np\n'), ((6696, 6718), 'numpy.isfinite', 'np.isfinite', (['g_val_mad'], {}), '(g_val_mad)\n', (6707, 6718), True, 'import numpy as np\n'), ((9325, 9370), 'numpy.percentile', 'np.percentile', (['best_results', '[75, 25]'], {'axis': '(0)'}), '(best_results, [75, 25], axis=0)\n', (9338, 9370), True, 'import numpy as np\n'), ((6905, 6927), 'numpy.sum', 'np.sum', (['(g_val_mad == 0)'], {}), '(g_val_mad == 0)\n', (6911, 6927), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
x = np.random.rand(10)
y = np.diff(x, 0)
print(x)
print(y)
plt.plot(x, y, 'x')
plt.show()
| [
"numpy.random.rand",
"numpy.diff",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot"
] | [((56, 74), 'numpy.random.rand', 'np.random.rand', (['(10)'], {}), '(10)\n', (70, 74), True, 'import numpy as np\n'), ((79, 92), 'numpy.diff', 'np.diff', (['x', '(0)'], {}), '(x, 0)\n', (86, 92), True, 'import numpy as np\n'), ((111, 130), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""x"""'], {}), "(x, y, 'x')\n", (119, 130), True, 'import matplotlib.pyplot as plt\n'), ((131, 141), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (139, 141), True, 'import matplotlib.pyplot as plt\n')] |
from collections import deque
import random
import numpy as np
import torch
import torch.nn as nn
import os
import sys
sys.path.append('../../')
from training.train_ddpg.ddpg_networks import ActorNet, CriticNet
class Agent:
"""
Class for DDPG Agent
Main Function:
1. Remember: Insert new memory into the memory list
2. Act: Generate New Action base on actor network
3. Replay: Train networks base on mini-batch replay
4. Save: Save actor network weights
5. Load: Load actor network weights
"""
def __init__(self,
state_num,
action_num,
rescale_state_num,
actor_net_dim=(256, 256, 256),
critic_net_dim=(512, 512, 512),
memory_size=1000,
batch_size=128,
target_tau=0.01,
target_update_steps=5,
reward_gamma=0.99,
actor_lr=0.0001,
critic_lr=0.0001,
epsilon_start=0.9,
epsilon_end=0.01,
epsilon_decay=0.999,
epsilon_rand_decay_start=60000,
epsilon_rand_decay_step=1,
poisson_window=50,
use_poisson=False,
use_cuda=True):
"""
:param state_num: number of state
:param action_num: number of action
:param rescale_state_num: number of rescale state
:param actor_net_dim: dimension of actor network
:param critic_net_dim: dimension of critic network
:param memory_size: size of memory
:param batch_size: size of mini-batch
:param target_tau: update rate for target network
:param target_update_steps: update steps for target network
:param reward_gamma: decay of future reward
:param actor_lr: learning rate for actor network
:param critic_lr: learning rate for critic network
:param epsilon_start: max value for random action
:param epsilon_end: min value for random action
:param epsilon_decay: steps from max to min random action
:param epsilon_rand_decay_start: start step for epsilon start to decay
:param epsilon_rand_decay_step: steps between epsilon decay
:param poisson_window: window of poisson spike
:param use_poisson: if or not use poisson spike random
:param use_cuda: if or not use gpu
"""
self.state_num = state_num
self.action_num = action_num
self.rescale_state_num = rescale_state_num
self.memory_size = memory_size
self.batch_size = batch_size
self.target_tau = target_tau
self.target_update_steps = target_update_steps
self.reward_gamma = reward_gamma
self.actor_lr = actor_lr
self.critic_lr = critic_lr
self.epsilon_start = epsilon_start
self.epsilon_end = epsilon_end
self.epsilon_decay = epsilon_decay
self.epsilon_rand_decay_start = epsilon_rand_decay_start
self.epsilon_rand_decay_step = epsilon_rand_decay_step
self.poisson_window = poisson_window
self.use_poisson = use_poisson
self.use_cuda = use_cuda
'''
Random Action
'''
self.epsilon = epsilon_start
'''
Device
'''
if self.use_cuda:
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
else:
self.device = torch.device("cpu")
"""
Memory
"""
self.memory = deque(maxlen=self.memory_size)
"""
Networks and Target Networks
"""
self.actor_net = ActorNet(self.rescale_state_num, self.action_num,
hidden1=actor_net_dim[0],
hidden2=actor_net_dim[1],
hidden3=actor_net_dim[2])
self.critic_net = CriticNet(self.state_num, self.action_num,
hidden1=critic_net_dim[0],
hidden2=critic_net_dim[1],
hidden3=critic_net_dim[2])
self.target_actor_net = ActorNet(self.rescale_state_num, self.action_num,
hidden1=actor_net_dim[0],
hidden2=actor_net_dim[1],
hidden3=actor_net_dim[2])
self.target_critic_net = CriticNet(self.state_num, self.action_num,
hidden1=critic_net_dim[0],
hidden2=critic_net_dim[1],
hidden3=critic_net_dim[2])
self._hard_update(self.target_actor_net, self.actor_net)
self._hard_update(self.target_critic_net, self.critic_net)
self.actor_net.to(self.device)
self.critic_net.to(self.device)
self.target_actor_net.to(self.device)
self.target_critic_net.to(self.device)
"""
Criterion and optimizers
"""
self.criterion = nn.MSELoss()
self.actor_optimizer = torch.optim.Adam(self.actor_net.parameters(), lr=self.actor_lr)
self.critic_optimizer = torch.optim.Adam(self.critic_net.parameters(), lr=self.critic_lr)
"""
Step Counter
"""
self.step_ita = 0
def remember(self, state, rescale_state, action, reward, next_state, rescale_next_state, done):
"""
Add New Memory Entry into memory deque
:param state: current state
:param action: current action
:param reward: reward after action
:param next_state: next action
:param done: if is done
"""
self.memory.append((state, rescale_state, action, reward, next_state, rescale_next_state, done))
def act(self, state, explore=True, train=True):
"""
Generate Action based on state
:param state: current state
:param explore: if or not do random explore
:param train: if or not in training
:return: action
"""
with torch.no_grad():
state = np.array(state)
if self.use_poisson:
state = self._state_2_poisson_state(state, 1)
state = torch.Tensor(state.reshape((1, -1))).to(self.device)
action = self.actor_net(state).to('cpu')
action = action.numpy().squeeze()
if train:
if self.step_ita > self.epsilon_rand_decay_start and self.epsilon > self.epsilon_end:
if self.step_ita % self.epsilon_rand_decay_step == 0:
self.epsilon = self.epsilon * self.epsilon_decay
noise = np.random.randn(self.action_num) * self.epsilon
action = noise + (1 - self.epsilon) * action
action = np.clip(action, [0., 0.], [1., 1.])
elif explore:
noise = np.random.randn(self.action_num) * self.epsilon_end
action = noise + (1 - self.epsilon_end) * action
action = np.clip(action, [0., 0.], [1., 1.])
return action.tolist()
def replay(self):
"""
Experience Replay Training
:return: actor_loss_item, critic_loss_item
"""
state_batch, r_state_batch, action_batch, reward_batch, nstate_batch, r_nstate_batch, done_batch = self._random_minibatch()
'''
Compuate Target Q Value
'''
with torch.no_grad():
naction_batch = self.target_actor_net(r_nstate_batch)
next_q = self.target_critic_net([nstate_batch, naction_batch])
target_q = reward_batch + self.reward_gamma * next_q * (1. - done_batch)
'''
Update Critic Network
'''
self.critic_optimizer.zero_grad()
current_q = self.critic_net([state_batch, action_batch])
critic_loss = self.criterion(current_q, target_q)
critic_loss_item = critic_loss.item()
critic_loss.backward()
self.critic_optimizer.step()
'''
Update Actor Network
'''
self.actor_optimizer.zero_grad()
current_action = self.actor_net(r_state_batch)
actor_loss = -self.critic_net([state_batch, current_action])
actor_loss = actor_loss.mean()
actor_loss_item = actor_loss.item()
actor_loss.backward()
self.actor_optimizer.step()
'''
Update Target Networks
'''
self.step_ita += 1
if self.step_ita % self.target_update_steps == 0:
self._soft_update(self.target_actor_net, self.actor_net)
self._soft_update(self.target_critic_net, self.critic_net)
return actor_loss_item, critic_loss_item
def reset_epsilon(self, new_epsilon, new_decay):
"""
Set Epsilon to a new value
:param new_epsilon: new epsilon value
:param new_decay: new epsilon decay
"""
self.epsilon = new_epsilon
self.epsilon_decay = new_decay
def save(self, save_dir, episode, run_name):
"""
Save Actor Net weights
:param save_dir: directory for saving weights
:param episode: number of episode
:param run_name: name of the run
"""
try:
os.mkdir(save_dir)
print("Directory ", save_dir, " Created")
except FileExistsError:
print("Directory", save_dir, " already exists")
torch.save(self.actor_net.state_dict(),
save_dir + '/' + run_name + '_actor_network_s' + str(episode) + '.pt')
print("Episode " + str(episode) + " weights saved ...")
def load(self, load_file_name):
"""
Load Actor Net weights
:param load_file_name: weights file name
"""
self.actor_net.to('cpu')
self.actor_net.load_state_dict(torch.load(load_file_name))
self.actor_net.to(self.device)
def _state_2_poisson_state(self, state_value, batch_size):
"""
Transform state to spikes then transform back to state to add random
:param state_value: state from environment transfer to firing rates of neurons
:param batch_size: batch size
:return: poisson_state
"""
spike_state_value = state_value.reshape((batch_size, self.rescale_state_num, 1))
state_spikes = np.random.rand(batch_size, self.rescale_state_num, self.poisson_window) < spike_state_value
poisson_state = np.sum(state_spikes, axis=2).reshape((batch_size, -1))
poisson_state = poisson_state / self.poisson_window
poisson_state = poisson_state.astype(float)
return poisson_state
def _random_minibatch(self):
"""
Random select mini-batch from memory
:return: state_batch, action_batch, reward_batch, nstate_batch, done_batch
"""
minibatch = random.sample(self.memory, self.batch_size)
state_batch = np.zeros((self.batch_size, self.state_num))
rescale_state_batch = np.zeros((self.batch_size, self.rescale_state_num))
action_batch = np.zeros((self.batch_size, self.action_num))
reward_batch = np.zeros((self.batch_size, 1))
nstate_batch = np.zeros((self.batch_size, self.state_num))
rescale_nstate_batch = np.zeros((self.batch_size, self.rescale_state_num))
done_batch = np.zeros((self.batch_size, 1))
for num in range(self.batch_size):
state_batch[num, :] = np.array(minibatch[num][0])
rescale_state_batch[num, :] = np.array(minibatch[num][1])
action_batch[num, :] = np.array(minibatch[num][2])
reward_batch[num, 0] = minibatch[num][3]
nstate_batch[num, :] = np.array(minibatch[num][4])
rescale_nstate_batch[num, :] = np.array(minibatch[num][5])
done_batch[num, 0] = minibatch[num][6]
if self.use_poisson:
rescale_state_batch = self._state_2_poisson_state(rescale_state_batch, self.batch_size)
rescale_nstate_batch = self._state_2_poisson_state(rescale_nstate_batch, self.batch_size)
state_batch = torch.Tensor(state_batch).to(self.device)
rescale_state_batch = torch.Tensor(rescale_state_batch).to(self.device)
action_batch = torch.Tensor(action_batch).to(self.device)
reward_batch = torch.Tensor(reward_batch).to(self.device)
nstate_batch = torch.Tensor(nstate_batch).to(self.device)
rescale_nstate_batch = torch.Tensor(rescale_nstate_batch).to(self.device)
done_batch = torch.Tensor(done_batch).to(self.device)
return state_batch, rescale_state_batch, action_batch, reward_batch, nstate_batch, rescale_nstate_batch, done_batch
def _hard_update(self, target, source):
"""
Hard Update Weights from source network to target network
:param target: target network
:param source: source network
"""
with torch.no_grad():
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
def _soft_update(self, target, source):
"""
Soft Update weights from source network to target network
:param target: target network
:param source: source network
"""
with torch.no_grad():
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(
target_param.data * (1.0 - self.target_tau) + param.data * self.target_tau
)
| [
"sys.path.append",
"os.mkdir",
"torch.nn.MSELoss",
"training.train_ddpg.ddpg_networks.ActorNet",
"numpy.sum",
"numpy.random.randn",
"random.sample",
"torch.load",
"training.train_ddpg.ddpg_networks.CriticNet",
"numpy.zeros",
"numpy.clip",
"torch.Tensor",
"numpy.array",
"torch.cuda.is_avail... | [((119, 144), 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), "('../../')\n", (134, 144), False, 'import sys\n'), ((3606, 3636), 'collections.deque', 'deque', ([], {'maxlen': 'self.memory_size'}), '(maxlen=self.memory_size)\n', (3611, 3636), False, 'from collections import deque\n'), ((3723, 3854), 'training.train_ddpg.ddpg_networks.ActorNet', 'ActorNet', (['self.rescale_state_num', 'self.action_num'], {'hidden1': 'actor_net_dim[0]', 'hidden2': 'actor_net_dim[1]', 'hidden3': 'actor_net_dim[2]'}), '(self.rescale_state_num, self.action_num, hidden1=actor_net_dim[0],\n hidden2=actor_net_dim[1], hidden3=actor_net_dim[2])\n', (3731, 3854), False, 'from training.train_ddpg.ddpg_networks import ActorNet, CriticNet\n'), ((3979, 4106), 'training.train_ddpg.ddpg_networks.CriticNet', 'CriticNet', (['self.state_num', 'self.action_num'], {'hidden1': 'critic_net_dim[0]', 'hidden2': 'critic_net_dim[1]', 'hidden3': 'critic_net_dim[2]'}), '(self.state_num, self.action_num, hidden1=critic_net_dim[0],\n hidden2=critic_net_dim[1], hidden3=critic_net_dim[2])\n', (3988, 4106), False, 'from training.train_ddpg.ddpg_networks import ActorNet, CriticNet\n'), ((4243, 4374), 'training.train_ddpg.ddpg_networks.ActorNet', 'ActorNet', (['self.rescale_state_num', 'self.action_num'], {'hidden1': 'actor_net_dim[0]', 'hidden2': 'actor_net_dim[1]', 'hidden3': 'actor_net_dim[2]'}), '(self.rescale_state_num, self.action_num, hidden1=actor_net_dim[0],\n hidden2=actor_net_dim[1], hidden3=actor_net_dim[2])\n', (4251, 4374), False, 'from training.train_ddpg.ddpg_networks import ActorNet, CriticNet\n'), ((4527, 4654), 'training.train_ddpg.ddpg_networks.CriticNet', 'CriticNet', (['self.state_num', 'self.action_num'], {'hidden1': 'critic_net_dim[0]', 'hidden2': 'critic_net_dim[1]', 'hidden3': 'critic_net_dim[2]'}), '(self.state_num, self.action_num, hidden1=critic_net_dim[0],\n hidden2=critic_net_dim[1], hidden3=critic_net_dim[2])\n', (4536, 4654), False, 'from training.train_ddpg.ddpg_networks import ActorNet, CriticNet\n'), ((5166, 5178), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (5176, 5178), True, 'import torch.nn as nn\n'), ((10941, 10984), 'random.sample', 'random.sample', (['self.memory', 'self.batch_size'], {}), '(self.memory, self.batch_size)\n', (10954, 10984), False, 'import random\n'), ((11007, 11050), 'numpy.zeros', 'np.zeros', (['(self.batch_size, self.state_num)'], {}), '((self.batch_size, self.state_num))\n', (11015, 11050), True, 'import numpy as np\n'), ((11081, 11132), 'numpy.zeros', 'np.zeros', (['(self.batch_size, self.rescale_state_num)'], {}), '((self.batch_size, self.rescale_state_num))\n', (11089, 11132), True, 'import numpy as np\n'), ((11156, 11200), 'numpy.zeros', 'np.zeros', (['(self.batch_size, self.action_num)'], {}), '((self.batch_size, self.action_num))\n', (11164, 11200), True, 'import numpy as np\n'), ((11224, 11254), 'numpy.zeros', 'np.zeros', (['(self.batch_size, 1)'], {}), '((self.batch_size, 1))\n', (11232, 11254), True, 'import numpy as np\n'), ((11278, 11321), 'numpy.zeros', 'np.zeros', (['(self.batch_size, self.state_num)'], {}), '((self.batch_size, self.state_num))\n', (11286, 11321), True, 'import numpy as np\n'), ((11353, 11404), 'numpy.zeros', 'np.zeros', (['(self.batch_size, self.rescale_state_num)'], {}), '((self.batch_size, self.rescale_state_num))\n', (11361, 11404), True, 'import numpy as np\n'), ((11426, 11456), 'numpy.zeros', 'np.zeros', (['(self.batch_size, 1)'], {}), '((self.batch_size, 1))\n', (11434, 11456), True, 'import numpy as np\n'), ((3525, 3544), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (3537, 3544), False, 'import torch\n'), ((6193, 6208), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6206, 6208), False, 'import torch\n'), ((6230, 6245), 'numpy.array', 'np.array', (['state'], {}), '(state)\n', (6238, 6245), True, 'import numpy as np\n'), ((6914, 6953), 'numpy.clip', 'np.clip', (['action', '[0.0, 0.0]', '[1.0, 1.0]'], {}), '(action, [0.0, 0.0], [1.0, 1.0])\n', (6921, 6953), True, 'import numpy as np\n'), ((7527, 7542), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7540, 7542), False, 'import torch\n'), ((9343, 9361), 'os.mkdir', 'os.mkdir', (['save_dir'], {}), '(save_dir)\n', (9351, 9361), False, 'import os\n'), ((9923, 9949), 'torch.load', 'torch.load', (['load_file_name'], {}), '(load_file_name)\n', (9933, 9949), False, 'import torch\n'), ((10423, 10494), 'numpy.random.rand', 'np.random.rand', (['batch_size', 'self.rescale_state_num', 'self.poisson_window'], {}), '(batch_size, self.rescale_state_num, self.poisson_window)\n', (10437, 10494), True, 'import numpy as np\n'), ((11534, 11561), 'numpy.array', 'np.array', (['minibatch[num][0]'], {}), '(minibatch[num][0])\n', (11542, 11561), True, 'import numpy as np\n'), ((11604, 11631), 'numpy.array', 'np.array', (['minibatch[num][1]'], {}), '(minibatch[num][1])\n', (11612, 11631), True, 'import numpy as np\n'), ((11667, 11694), 'numpy.array', 'np.array', (['minibatch[num][2]'], {}), '(minibatch[num][2])\n', (11675, 11694), True, 'import numpy as np\n'), ((11783, 11810), 'numpy.array', 'np.array', (['minibatch[num][4]'], {}), '(minibatch[num][4])\n', (11791, 11810), True, 'import numpy as np\n'), ((11854, 11881), 'numpy.array', 'np.array', (['minibatch[num][5]'], {}), '(minibatch[num][5])\n', (11862, 11881), True, 'import numpy as np\n'), ((12998, 13013), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13011, 13013), False, 'import torch\n'), ((13377, 13392), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13390, 13392), False, 'import torch\n'), ((6788, 6820), 'numpy.random.randn', 'np.random.randn', (['self.action_num'], {}), '(self.action_num)\n', (6803, 6820), True, 'import numpy as np\n'), ((7126, 7165), 'numpy.clip', 'np.clip', (['action', '[0.0, 0.0]', '[1.0, 1.0]'], {}), '(action, [0.0, 0.0], [1.0, 1.0])\n', (7133, 7165), True, 'import numpy as np\n'), ((10539, 10567), 'numpy.sum', 'np.sum', (['state_spikes'], {'axis': '(2)'}), '(state_spikes, axis=2)\n', (10545, 10567), True, 'import numpy as np\n'), ((12186, 12211), 'torch.Tensor', 'torch.Tensor', (['state_batch'], {}), '(state_batch)\n', (12198, 12211), False, 'import torch\n'), ((12258, 12291), 'torch.Tensor', 'torch.Tensor', (['rescale_state_batch'], {}), '(rescale_state_batch)\n', (12270, 12291), False, 'import torch\n'), ((12331, 12357), 'torch.Tensor', 'torch.Tensor', (['action_batch'], {}), '(action_batch)\n', (12343, 12357), False, 'import torch\n'), ((12397, 12423), 'torch.Tensor', 'torch.Tensor', (['reward_batch'], {}), '(reward_batch)\n', (12409, 12423), False, 'import torch\n'), ((12463, 12489), 'torch.Tensor', 'torch.Tensor', (['nstate_batch'], {}), '(nstate_batch)\n', (12475, 12489), False, 'import torch\n'), ((12537, 12571), 'torch.Tensor', 'torch.Tensor', (['rescale_nstate_batch'], {}), '(rescale_nstate_batch)\n', (12549, 12571), False, 'import torch\n'), ((12609, 12633), 'torch.Tensor', 'torch.Tensor', (['done_batch'], {}), '(done_batch)\n', (12621, 12633), False, 'import torch\n'), ((3447, 3472), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3470, 3472), False, 'import torch\n'), ((6992, 7024), 'numpy.random.randn', 'np.random.randn', (['self.action_num'], {}), '(self.action_num)\n', (7007, 7024), True, 'import numpy as np\n')] |
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(-6 * np.pi, 6 * np.pi, 1000)
y = np.sin(x)
z = np.cos(x)
fig = plt.figure()
ax = Axes3D(fig)
ax.plot(x, y, z)
plt.show()
| [
"matplotlib.pyplot.show",
"mpl_toolkits.mplot3d.Axes3D",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.cos",
"numpy.linspace"
] | [((97, 137), 'numpy.linspace', 'np.linspace', (['(-6 * np.pi)', '(6 * np.pi)', '(1000)'], {}), '(-6 * np.pi, 6 * np.pi, 1000)\n', (108, 137), True, 'import numpy as np\n'), ((142, 151), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (148, 151), True, 'import numpy as np\n'), ((156, 165), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (162, 165), True, 'import numpy as np\n'), ((173, 185), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (183, 185), True, 'import matplotlib.pyplot as plt\n'), ((191, 202), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (197, 202), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((222, 232), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (230, 232), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import copy
import argparse
import time
from pymouse import PyMouse
from pykeyboard import PyKeyboard
import cv2 as cv
import numpy as np
import mediapipe as mp
from utils import CvFpsCalc
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--device", type=int, default=0)
parser.add_argument("--width", help='cap width', type=int, default=960)
parser.add_argument("--height", help='cap height', type=int, default=540)
parser.add_argument("--max_num_hands", type=int, default=2)
parser.add_argument("--min_detection_confidence",
help='min_detection_confidence',
type=float,
default=0.7)
parser.add_argument("--min_tracking_confidence",
help='min_tracking_confidence',
type=int,
default=0.5)
parser.add_argument('--use_brect', action='store_true')
args = parser.parse_args()
return args
def main():
# 引数解析 #################################################################
args = get_args()
cap_device = args.device
cap_width = args.width
cap_height = args.height
max_num_hands = args.max_num_hands
min_detection_confidence = args.min_detection_confidence
min_tracking_confidence = args.min_tracking_confidence
use_brect = args.use_brect
now = [0, 0]
past = [0,0]
count=0
# 相机准备 ###############################################################
cap = cv.VideoCapture(cap_device)
cap.set(cv.CAP_PROP_FRAME_WIDTH, cap_width)
cap.set(cv.CAP_PROP_FRAME_HEIGHT, cap_height)
# 模型载荷 #############################################################
mp_hands = mp.solutions.hands
hands = mp_hands.Hands(
max_num_hands=max_num_hands,
min_detection_confidence=min_detection_confidence,
min_tracking_confidence=min_tracking_confidence,
)
# FPS测量模块 ########################################################
cvFpsCalc = CvFpsCalc(buffer_len=10)
k = PyKeyboard()
m = PyMouse()
while True:
display_fps = cvFpsCalc.get()
# 摄像机捕捉 #####################################################
ret, image = cap.read()
if not ret:
break
image = cv.flip(image, 1) # 镜像显示
debug_image = copy.deepcopy(image)
# 检测实施 #############################################################
image = cv.cvtColor(image, cv.COLOR_BGR2RGB)
results = hands.process(image)
# 描画 ################################################################
if results.multi_hand_landmarks is not None:
for hand_landmarks, handedness in zip(results.multi_hand_landmarks,
results.multi_handedness):
# 手掌重心計算
cx, cy = calc_palm_moment(debug_image, hand_landmarks)
# 外接矩形計算
brect = calc_bounding_rect(debug_image, hand_landmarks)
# 描画
debug_image = draw_landmarks(debug_image, cx, cy,
hand_landmarks, handedness)
debug_image = draw_bounding_rect(use_brect, debug_image, brect)
past = now
now = [cx, cy]
cv.putText(debug_image, "FPS:" + str(display_fps), (10, 30),
cv.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), 2, cv.LINE_AA)
# 键盘输入处理(ESC:終了) #################################################
key = cv.waitKey(1)
if key == 27: # ESC
break
count=count+1
print(now)
print(past)
print('\n')
if count>2 and results.multi_hand_landmarks is not None:
if now[0]-past[0]>120 and past!=[0,0]:
k.tap_key(k.right_key)
time.sleep(0.5)
if now[0]-past[0]<-120 and past!=[0,0]:
k.tap_key(k.left_key)
time.sleep(0.5)
# 画面反映 #############################################################
cv.imshow('MediaPipe Hand Demo', debug_image)
cap.release()
cv.destroyAllWindows()
def calc_palm_moment(image, landmarks):
image_width, image_height = image.shape[1], image.shape[0]
palm_array = np.empty((0, 2), int)
for index, landmark in enumerate(landmarks.landmark):
landmark_x = min(int(landmark.x * image_width), image_width - 1)
landmark_y = min(int(landmark.y * image_height), image_height - 1)
landmark_point = [np.array((landmark_x, landmark_y))]
if index == 0: # 手腕1
palm_array = np.append(palm_array, landmark_point, axis=0)
if index == 1: # 手腕2
palm_array = np.append(palm_array, landmark_point, axis=0)
if index == 5: # 食指:指根
palm_array = np.append(palm_array, landmark_point, axis=0)
if index == 9: # 中指:指根
palm_array = np.append(palm_array, landmark_point, axis=0)
if index == 13: # 无名指:指根
palm_array = np.append(palm_array, landmark_point, axis=0)
if index == 17: # 小指:指根
palm_array = np.append(palm_array, landmark_point, axis=0)
M = cv.moments(palm_array)
cx, cy = 0, 0
if M['m00'] != 0:
cx = int(M['m10'] / M['m00'])
cy = int(M['m01'] / M['m00'])
return cx, cy
def calc_bounding_rect(image, landmarks):
image_width, image_height = image.shape[1], image.shape[0]
landmark_array = np.empty((0, 2), int)
for _, landmark in enumerate(landmarks.landmark):
landmark_x = min(int(landmark.x * image_width), image_width - 1)
landmark_y = min(int(landmark.y * image_height), image_height - 1)
landmark_point = [np.array((landmark_x, landmark_y))]
landmark_array = np.append(landmark_array, landmark_point, axis=0)
x, y, w, h = cv.boundingRect(landmark_array)
return [x, y, x + w, y + h]
def draw_landmarks(image, cx, cy, landmarks, handedness):
image_width, image_height = image.shape[1], image.shape[0]
landmark_point = []
# 关键点
for index, landmark in enumerate(landmarks.landmark):
if landmark.visibility < 0 or landmark.presence < 0:
continue
landmark_x = min(int(landmark.x * image_width), image_width - 1)
landmark_y = min(int(landmark.y * image_height), image_height - 1)
# landmark_z = landmark.z
landmark_point.append((landmark_x, landmark_y))
if index == 0: # 手腕1
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 1: # 手腕2
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 2: # 拇指:指根
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 3: # 拇指:第1关节
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 4: # 拇指:指尖
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
cv.circle(image, (landmark_x, landmark_y), 12, (0, 255, 0), 2)
if index == 5: # 人差指:付け根
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 6: # 人差指:第2関節
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 7: # 人差指:第1関節
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 8: # 人差指:指先
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
cv.circle(image, (landmark_x, landmark_y), 12, (0, 255, 0), 2)
if index == 9: # 中指:付け根
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 10: # 中指:第2関節
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 11: # 中指:第1関節
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 12: # 中指:指先
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
cv.circle(image, (landmark_x, landmark_y), 12, (0, 255, 0), 2)
if index == 13: # 薬指:付け根
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 14: # 薬指:第2関節
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 15: # 薬指:第1関節
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 16: # 薬指:指先
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
cv.circle(image, (landmark_x, landmark_y), 12, (0, 255, 0), 2)
if index == 17: # 小指:付け根
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 18: # 小指:第2関節
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 19: # 小指:第1関節
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
if index == 20: # 小指:指先
cv.circle(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)
cv.circle(image, (landmark_x, landmark_y), 12, (0, 255, 0), 2)
# 连接线
if len(landmark_point) > 0:
# 拇指
cv.line(image, landmark_point[2], landmark_point[3], (0, 255, 0), 2)
cv.line(image, landmark_point[3], landmark_point[4], (0, 255, 0), 2)
# 食指
cv.line(image, landmark_point[5], landmark_point[6], (0, 255, 0), 2)
cv.line(image, landmark_point[6], landmark_point[7], (0, 255, 0), 2)
cv.line(image, landmark_point[7], landmark_point[8], (0, 255, 0), 2)
# 中指
cv.line(image, landmark_point[9], landmark_point[10], (0, 255, 0), 2)
cv.line(image, landmark_point[10], landmark_point[11], (0, 255, 0), 2)
cv.line(image, landmark_point[11], landmark_point[12], (0, 255, 0), 2)
# 无名指
cv.line(image, landmark_point[13], landmark_point[14], (0, 255, 0), 2)
cv.line(image, landmark_point[14], landmark_point[15], (0, 255, 0), 2)
cv.line(image, landmark_point[15], landmark_point[16], (0, 255, 0), 2)
# 小指
cv.line(image, landmark_point[17], landmark_point[18], (0, 255, 0), 2)
cv.line(image, landmark_point[18], landmark_point[19], (0, 255, 0), 2)
cv.line(image, landmark_point[19], landmark_point[20], (0, 255, 0), 2)
# 手掌
cv.line(image, landmark_point[0], landmark_point[1], (0, 255, 0), 2)
cv.line(image, landmark_point[1], landmark_point[2], (0, 255, 0), 2)
cv.line(image, landmark_point[2], landmark_point[5], (0, 255, 0), 2)
cv.line(image, landmark_point[5], landmark_point[9], (0, 255, 0), 2)
cv.line(image, landmark_point[9], landmark_point[13], (0, 255, 0), 2)
cv.line(image, landmark_point[13], landmark_point[17], (0, 255, 0), 2)
cv.line(image, landmark_point[17], landmark_point[0], (0, 255, 0), 2)
# 重心 + 左右
if len(landmark_point) > 0:
# handedness.classification[0].index
# handedness.classification[0].score
cv.circle(image, (cx, cy), 12, (0, 255, 0), 2)
cv.putText(image, handedness.classification[0].label[0],
(cx - 6, cy + 6), cv.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0),
2, cv.LINE_AA) # label[0]:一文字目だけ
return image
def draw_bounding_rect(use_brect, image, brect):
if use_brect:
# 外接矩形
cv.rectangle(image, (brect[0], brect[1]), (brect[2], brect[3]),
(0, 255, 0), 2)
return image
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"numpy.empty",
"pymouse.PyMouse",
"cv2.rectangle",
"cv2.imshow",
"cv2.line",
"cv2.cvtColor",
"numpy.append",
"cv2.boundingRect",
"cv2.destroyAllWindows",
"copy.deepcopy",
"cv2.circle",
"cv2.waitKey",
"pykeyboard.PyKeyboard",
"time.sleep",
"cv2.flip",
"cv2.p... | [((267, 292), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (290, 292), False, 'import argparse\n'), ((1568, 1595), 'cv2.VideoCapture', 'cv.VideoCapture', (['cap_device'], {}), '(cap_device)\n', (1583, 1595), True, 'import cv2 as cv\n'), ((2077, 2101), 'utils.CvFpsCalc', 'CvFpsCalc', ([], {'buffer_len': '(10)'}), '(buffer_len=10)\n', (2086, 2101), False, 'from utils import CvFpsCalc\n'), ((2111, 2123), 'pykeyboard.PyKeyboard', 'PyKeyboard', ([], {}), '()\n', (2121, 2123), False, 'from pykeyboard import PyKeyboard\n'), ((2132, 2141), 'pymouse.PyMouse', 'PyMouse', ([], {}), '()\n', (2139, 2141), False, 'from pymouse import PyMouse\n'), ((4218, 4240), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (4238, 4240), True, 'import cv2 as cv\n'), ((4364, 4385), 'numpy.empty', 'np.empty', (['(0, 2)', 'int'], {}), '((0, 2), int)\n', (4372, 4385), True, 'import numpy as np\n'), ((5282, 5304), 'cv2.moments', 'cv.moments', (['palm_array'], {}), '(palm_array)\n', (5292, 5304), True, 'import cv2 as cv\n'), ((5569, 5590), 'numpy.empty', 'np.empty', (['(0, 2)', 'int'], {}), '((0, 2), int)\n', (5577, 5590), True, 'import numpy as np\n'), ((5951, 5982), 'cv2.boundingRect', 'cv.boundingRect', (['landmark_array'], {}), '(landmark_array)\n', (5966, 5982), True, 'import cv2 as cv\n'), ((2354, 2371), 'cv2.flip', 'cv.flip', (['image', '(1)'], {}), '(image, 1)\n', (2361, 2371), True, 'import cv2 as cv\n'), ((2402, 2422), 'copy.deepcopy', 'copy.deepcopy', (['image'], {}), '(image)\n', (2415, 2422), False, 'import copy\n'), ((2517, 2553), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2RGB'], {}), '(image, cv.COLOR_BGR2RGB)\n', (2528, 2553), True, 'import cv2 as cv\n'), ((3611, 3624), 'cv2.waitKey', 'cv.waitKey', (['(1)'], {}), '(1)\n', (3621, 3624), True, 'import cv2 as cv\n'), ((4149, 4194), 'cv2.imshow', 'cv.imshow', (['"""MediaPipe Hand Demo"""', 'debug_image'], {}), "('MediaPipe Hand Demo', debug_image)\n", (4158, 4194), True, 'import cv2 as cv\n'), ((5883, 5932), 'numpy.append', 'np.append', (['landmark_array', 'landmark_point'], {'axis': '(0)'}), '(landmark_array, landmark_point, axis=0)\n', (5892, 5932), True, 'import numpy as np\n'), ((9254, 9322), 'cv2.line', 'cv.line', (['image', 'landmark_point[2]', 'landmark_point[3]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[2], landmark_point[3], (0, 255, 0), 2)\n', (9261, 9322), True, 'import cv2 as cv\n'), ((9331, 9399), 'cv2.line', 'cv.line', (['image', 'landmark_point[3]', 'landmark_point[4]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[3], landmark_point[4], (0, 255, 0), 2)\n', (9338, 9399), True, 'import cv2 as cv\n'), ((9422, 9490), 'cv2.line', 'cv.line', (['image', 'landmark_point[5]', 'landmark_point[6]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[5], landmark_point[6], (0, 255, 0), 2)\n', (9429, 9490), True, 'import cv2 as cv\n'), ((9499, 9567), 'cv2.line', 'cv.line', (['image', 'landmark_point[6]', 'landmark_point[7]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[6], landmark_point[7], (0, 255, 0), 2)\n', (9506, 9567), True, 'import cv2 as cv\n'), ((9576, 9644), 'cv2.line', 'cv.line', (['image', 'landmark_point[7]', 'landmark_point[8]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[7], landmark_point[8], (0, 255, 0), 2)\n', (9583, 9644), True, 'import cv2 as cv\n'), ((9667, 9736), 'cv2.line', 'cv.line', (['image', 'landmark_point[9]', 'landmark_point[10]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[9], landmark_point[10], (0, 255, 0), 2)\n', (9674, 9736), True, 'import cv2 as cv\n'), ((9745, 9815), 'cv2.line', 'cv.line', (['image', 'landmark_point[10]', 'landmark_point[11]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[10], landmark_point[11], (0, 255, 0), 2)\n', (9752, 9815), True, 'import cv2 as cv\n'), ((9824, 9894), 'cv2.line', 'cv.line', (['image', 'landmark_point[11]', 'landmark_point[12]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[11], landmark_point[12], (0, 255, 0), 2)\n', (9831, 9894), True, 'import cv2 as cv\n'), ((9918, 9988), 'cv2.line', 'cv.line', (['image', 'landmark_point[13]', 'landmark_point[14]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[13], landmark_point[14], (0, 255, 0), 2)\n', (9925, 9988), True, 'import cv2 as cv\n'), ((9997, 10067), 'cv2.line', 'cv.line', (['image', 'landmark_point[14]', 'landmark_point[15]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[14], landmark_point[15], (0, 255, 0), 2)\n', (10004, 10067), True, 'import cv2 as cv\n'), ((10076, 10146), 'cv2.line', 'cv.line', (['image', 'landmark_point[15]', 'landmark_point[16]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[15], landmark_point[16], (0, 255, 0), 2)\n', (10083, 10146), True, 'import cv2 as cv\n'), ((10169, 10239), 'cv2.line', 'cv.line', (['image', 'landmark_point[17]', 'landmark_point[18]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[17], landmark_point[18], (0, 255, 0), 2)\n', (10176, 10239), True, 'import cv2 as cv\n'), ((10248, 10318), 'cv2.line', 'cv.line', (['image', 'landmark_point[18]', 'landmark_point[19]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[18], landmark_point[19], (0, 255, 0), 2)\n', (10255, 10318), True, 'import cv2 as cv\n'), ((10327, 10397), 'cv2.line', 'cv.line', (['image', 'landmark_point[19]', 'landmark_point[20]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[19], landmark_point[20], (0, 255, 0), 2)\n', (10334, 10397), True, 'import cv2 as cv\n'), ((10420, 10488), 'cv2.line', 'cv.line', (['image', 'landmark_point[0]', 'landmark_point[1]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[0], landmark_point[1], (0, 255, 0), 2)\n', (10427, 10488), True, 'import cv2 as cv\n'), ((10497, 10565), 'cv2.line', 'cv.line', (['image', 'landmark_point[1]', 'landmark_point[2]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[1], landmark_point[2], (0, 255, 0), 2)\n', (10504, 10565), True, 'import cv2 as cv\n'), ((10574, 10642), 'cv2.line', 'cv.line', (['image', 'landmark_point[2]', 'landmark_point[5]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[2], landmark_point[5], (0, 255, 0), 2)\n', (10581, 10642), True, 'import cv2 as cv\n'), ((10651, 10719), 'cv2.line', 'cv.line', (['image', 'landmark_point[5]', 'landmark_point[9]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[5], landmark_point[9], (0, 255, 0), 2)\n', (10658, 10719), True, 'import cv2 as cv\n'), ((10728, 10797), 'cv2.line', 'cv.line', (['image', 'landmark_point[9]', 'landmark_point[13]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[9], landmark_point[13], (0, 255, 0), 2)\n', (10735, 10797), True, 'import cv2 as cv\n'), ((10806, 10876), 'cv2.line', 'cv.line', (['image', 'landmark_point[13]', 'landmark_point[17]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[13], landmark_point[17], (0, 255, 0), 2)\n', (10813, 10876), True, 'import cv2 as cv\n'), ((10885, 10954), 'cv2.line', 'cv.line', (['image', 'landmark_point[17]', 'landmark_point[0]', '(0, 255, 0)', '(2)'], {}), '(image, landmark_point[17], landmark_point[0], (0, 255, 0), 2)\n', (10892, 10954), True, 'import cv2 as cv\n'), ((11101, 11147), 'cv2.circle', 'cv.circle', (['image', '(cx, cy)', '(12)', '(0, 255, 0)', '(2)'], {}), '(image, (cx, cy), 12, (0, 255, 0), 2)\n', (11110, 11147), True, 'import cv2 as cv\n'), ((11156, 11292), 'cv2.putText', 'cv.putText', (['image', 'handedness.classification[0].label[0]', '(cx - 6, cy + 6)', 'cv.FONT_HERSHEY_SIMPLEX', '(0.6)', '(0, 255, 0)', '(2)', 'cv.LINE_AA'], {}), '(image, handedness.classification[0].label[0], (cx - 6, cy + 6),\n cv.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2, cv.LINE_AA)\n', (11166, 11292), True, 'import cv2 as cv\n'), ((11456, 11535), 'cv2.rectangle', 'cv.rectangle', (['image', '(brect[0], brect[1])', '(brect[2], brect[3])', '(0, 255, 0)', '(2)'], {}), '(image, (brect[0], brect[1]), (brect[2], brect[3]), (0, 255, 0), 2)\n', (11468, 11535), True, 'import cv2 as cv\n'), ((4620, 4654), 'numpy.array', 'np.array', (['(landmark_x, landmark_y)'], {}), '((landmark_x, landmark_y))\n', (4628, 4654), True, 'import numpy as np\n'), ((4712, 4757), 'numpy.append', 'np.append', (['palm_array', 'landmark_point'], {'axis': '(0)'}), '(palm_array, landmark_point, axis=0)\n', (4721, 4757), True, 'import numpy as np\n'), ((4813, 4858), 'numpy.append', 'np.append', (['palm_array', 'landmark_point'], {'axis': '(0)'}), '(palm_array, landmark_point, axis=0)\n', (4822, 4858), True, 'import numpy as np\n'), ((4916, 4961), 'numpy.append', 'np.append', (['palm_array', 'landmark_point'], {'axis': '(0)'}), '(palm_array, landmark_point, axis=0)\n', (4925, 4961), True, 'import numpy as np\n'), ((5019, 5064), 'numpy.append', 'np.append', (['palm_array', 'landmark_point'], {'axis': '(0)'}), '(palm_array, landmark_point, axis=0)\n', (5028, 5064), True, 'import numpy as np\n'), ((5124, 5169), 'numpy.append', 'np.append', (['palm_array', 'landmark_point'], {'axis': '(0)'}), '(palm_array, landmark_point, axis=0)\n', (5133, 5169), True, 'import numpy as np\n'), ((5228, 5273), 'numpy.append', 'np.append', (['palm_array', 'landmark_point'], {'axis': '(0)'}), '(palm_array, landmark_point, axis=0)\n', (5237, 5273), True, 'import numpy as np\n'), ((5821, 5855), 'numpy.array', 'np.array', (['(landmark_x, landmark_y)'], {}), '((landmark_x, landmark_y))\n', (5829, 5855), True, 'import numpy as np\n'), ((6598, 6659), 'cv2.circle', 'cv.circle', (['image', '(landmark_x, landmark_y)', '(5)', '(0, 255, 0)', '(2)'], {}), '(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)\n', (6607, 6659), True, 'import cv2 as cv\n'), ((6702, 6763), 'cv2.circle', 'cv.circle', (['image', '(landmark_x, landmark_y)', '(5)', '(0, 255, 0)', '(2)'], {}), '(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)\n', (6711, 6763), True, 'import cv2 as cv\n'), ((6808, 6869), 'cv2.circle', 'cv.circle', (['image', '(landmark_x, landmark_y)', '(5)', '(0, 255, 0)', '(2)'], {}), '(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)\n', (6817, 6869), True, 'import cv2 as cv\n'), ((6916, 6977), 'cv2.circle', 'cv.circle', (['image', '(landmark_x, landmark_y)', '(5)', '(0, 255, 0)', '(2)'], {}), '(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)\n', (6925, 6977), True, 'import cv2 as cv\n'), ((7022, 7083), 'cv2.circle', 'cv.circle', (['image', '(landmark_x, landmark_y)', '(5)', '(0, 255, 0)', '(2)'], {}), '(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)\n', (7031, 7083), True, 'import cv2 as cv\n'), ((7096, 7158), 'cv2.circle', 'cv.circle', (['image', '(landmark_x, landmark_y)', '(12)', '(0, 255, 0)', '(2)'], {}), '(image, (landmark_x, landmark_y), 12, (0, 255, 0), 2)\n', (7105, 7158), True, 'import cv2 as cv\n'), ((7205, 7266), 'cv2.circle', 'cv.circle', (['image', '(landmark_x, landmark_y)', '(5)', '(0, 255, 0)', '(2)'], {}), '(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)\n', (7214, 7266), True, 'import cv2 as cv\n'), ((7314, 7375), 'cv2.circle', 'cv.circle', (['image', '(landmark_x, landmark_y)', '(5)', '(0, 255, 0)', '(2)'], {}), '(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)\n', (7323, 7375), True, 'import cv2 as cv\n'), ((7423, 7484), 'cv2.circle', 'cv.circle', (['image', '(landmark_x, landmark_y)', '(5)', '(0, 255, 0)', '(2)'], {}), '(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)\n', (7432, 7484), True, 'import cv2 as cv\n'), ((7530, 7591), 'cv2.circle', 'cv.circle', (['image', '(landmark_x, landmark_y)', '(5)', '(0, 255, 0)', '(2)'], {}), '(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)\n', (7539, 7591), True, 'import cv2 as cv\n'), ((7604, 7666), 'cv2.circle', 'cv.circle', (['image', '(landmark_x, landmark_y)', '(12)', '(0, 255, 0)', '(2)'], {}), '(image, (landmark_x, landmark_y), 12, (0, 255, 0), 2)\n', (7613, 7666), True, 'import cv2 as cv\n'), ((7712, 7773), 'cv2.circle', 'cv.circle', (['image', '(landmark_x, landmark_y)', '(5)', '(0, 255, 0)', '(2)'], {}), '(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)\n', (7721, 7773), True, 'import cv2 as cv\n'), ((7821, 7882), 'cv2.circle', 'cv.circle', (['image', '(landmark_x, landmark_y)', '(5)', '(0, 255, 0)', '(2)'], {}), '(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)\n', (7830, 7882), True, 'import cv2 as cv\n'), ((7930, 7991), 'cv2.circle', 'cv.circle', (['image', '(landmark_x, landmark_y)', '(5)', '(0, 255, 0)', '(2)'], {}), '(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)\n', (7939, 7991), True, 'import cv2 as cv\n'), ((8037, 8098), 'cv2.circle', 'cv.circle', (['image', '(landmark_x, landmark_y)', '(5)', '(0, 255, 0)', '(2)'], {}), '(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)\n', (8046, 8098), True, 'import cv2 as cv\n'), ((8111, 8173), 'cv2.circle', 'cv.circle', (['image', '(landmark_x, landmark_y)', '(12)', '(0, 255, 0)', '(2)'], {}), '(image, (landmark_x, landmark_y), 12, (0, 255, 0), 2)\n', (8120, 8173), True, 'import cv2 as cv\n'), ((8220, 8281), 'cv2.circle', 'cv.circle', (['image', '(landmark_x, landmark_y)', '(5)', '(0, 255, 0)', '(2)'], {}), '(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)\n', (8229, 8281), True, 'import cv2 as cv\n'), ((8329, 8390), 'cv2.circle', 'cv.circle', (['image', '(landmark_x, landmark_y)', '(5)', '(0, 255, 0)', '(2)'], {}), '(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)\n', (8338, 8390), True, 'import cv2 as cv\n'), ((8438, 8499), 'cv2.circle', 'cv.circle', (['image', '(landmark_x, landmark_y)', '(5)', '(0, 255, 0)', '(2)'], {}), '(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)\n', (8447, 8499), True, 'import cv2 as cv\n'), ((8545, 8606), 'cv2.circle', 'cv.circle', (['image', '(landmark_x, landmark_y)', '(5)', '(0, 255, 0)', '(2)'], {}), '(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)\n', (8554, 8606), True, 'import cv2 as cv\n'), ((8619, 8681), 'cv2.circle', 'cv.circle', (['image', '(landmark_x, landmark_y)', '(12)', '(0, 255, 0)', '(2)'], {}), '(image, (landmark_x, landmark_y), 12, (0, 255, 0), 2)\n', (8628, 8681), True, 'import cv2 as cv\n'), ((8728, 8789), 'cv2.circle', 'cv.circle', (['image', '(landmark_x, landmark_y)', '(5)', '(0, 255, 0)', '(2)'], {}), '(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)\n', (8737, 8789), True, 'import cv2 as cv\n'), ((8837, 8898), 'cv2.circle', 'cv.circle', (['image', '(landmark_x, landmark_y)', '(5)', '(0, 255, 0)', '(2)'], {}), '(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)\n', (8846, 8898), True, 'import cv2 as cv\n'), ((8946, 9007), 'cv2.circle', 'cv.circle', (['image', '(landmark_x, landmark_y)', '(5)', '(0, 255, 0)', '(2)'], {}), '(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)\n', (8955, 9007), True, 'import cv2 as cv\n'), ((9053, 9114), 'cv2.circle', 'cv.circle', (['image', '(landmark_x, landmark_y)', '(5)', '(0, 255, 0)', '(2)'], {}), '(image, (landmark_x, landmark_y), 5, (0, 255, 0), 2)\n', (9062, 9114), True, 'import cv2 as cv\n'), ((9127, 9189), 'cv2.circle', 'cv.circle', (['image', '(landmark_x, landmark_y)', '(12)', '(0, 255, 0)', '(2)'], {}), '(image, (landmark_x, landmark_y), 12, (0, 255, 0), 2)\n', (9136, 9189), True, 'import cv2 as cv\n'), ((3924, 3939), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (3934, 3939), False, 'import time\n'), ((4046, 4061), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (4056, 4061), False, 'import time\n')] |
# coding: utf-8
import re
import numpy
class toolkit:
def readConf(self, filename='./decisionTree.conf'):
with open(filename, 'r') as f:
text=f.read()
trainset_pat=re.compile(r'trainset_name=(.*)\n')
testset_pat=re.compile(r'testset_name=(.*)\n')
feature_discrete_pat=re.compile(r'feature_discrete=(.*)\n')
treeType_pat=re.compile(r'treeType=(.*)\n')
pruning_pat=re.compile(r'pruning=(.*)\n')
save_name_pat=re.compile(r'save_name=(.*)\n')
conf={}
conf['trainset']=trainset_pat.findall(text)[0].strip()
conf['testset']=testset_pat.findall(text)[0].strip() if testset_pat.findall(text) else ''
conf['feature_discrete']=eval('{'+feature_discrete_pat.findall(text)[0].strip()+'}')
conf['treeType']=treeType_pat.findall(text)[0].strip()
conf['pruning']=eval(pruning_pat.findall(text)[0].strip())
conf['save_name']=save_name_pat.findall(text)[0].strip()
conf['A']={key: [i] for i, key in enumerate(conf['feature_discrete'].keys())}
return conf
def genfromtxt(self, filename):
return numpy.genfromtxt(filename, dtype=str)
| [
"numpy.genfromtxt",
"re.compile"
] | [((198, 233), 're.compile', 're.compile', (['"""trainset_name=(.*)\\\\n"""'], {}), "('trainset_name=(.*)\\\\n')\n", (208, 233), False, 'import re\n'), ((254, 288), 're.compile', 're.compile', (['"""testset_name=(.*)\\\\n"""'], {}), "('testset_name=(.*)\\\\n')\n", (264, 288), False, 'import re\n'), ((318, 356), 're.compile', 're.compile', (['"""feature_discrete=(.*)\\\\n"""'], {}), "('feature_discrete=(.*)\\\\n')\n", (328, 356), False, 'import re\n'), ((378, 408), 're.compile', 're.compile', (['"""treeType=(.*)\\\\n"""'], {}), "('treeType=(.*)\\\\n')\n", (388, 408), False, 'import re\n'), ((429, 458), 're.compile', 're.compile', (['"""pruning=(.*)\\\\n"""'], {}), "('pruning=(.*)\\\\n')\n", (439, 458), False, 'import re\n'), ((481, 512), 're.compile', 're.compile', (['"""save_name=(.*)\\\\n"""'], {}), "('save_name=(.*)\\\\n')\n", (491, 512), False, 'import re\n'), ((1145, 1182), 'numpy.genfromtxt', 'numpy.genfromtxt', (['filename'], {'dtype': 'str'}), '(filename, dtype=str)\n', (1161, 1182), False, 'import numpy\n')] |
import os
import ast
import numpy as np
from uti import webBrowser
from abaqusGui import *
import gui_commands
import gui_plot
from desicos import __version__ as version
import desicos.conecylDB as conecylDB
from desicos.conecylDB import fetch
from desicos.abaqus.utils import remove_special_characters as rsc
from desicos.abaqus.constants import *
NUM_PLIES = 40
MAX_MODELS = 40
def cc_form2dict(db, form):
tmp = form.laminateKw.getValueAsString()
laminate = np.array(ast.literal_eval(tmp))
cc = {}
if ''.join(laminate.flatten()):
cc['laminapropKeys'] = [i for i in laminate[:, 0] if i != '']
cc['plyts'] = [float(i) for i in laminate[:, 1] if i != '']
cc['stack'] = [float(i) for i in laminate[:, 2] if i != '']
others = ['rbot', 'H', 'alphadeg', 'elem_type', 'betadeg', 'omegadeg',
'numel_r', 'axial_displ']
for k in others:
cc[k] = getattr(form, k+'Kw').getValue()
return cc
def cc_dict2form(ccname, cc, db, form):
# clearing laminateKw
maxRow = db.laminateTable.getNumRows()
db.laminateTable.clearContents(1, 1, maxRow-1, 1, False)
#
form.rbotKw.setValue(cc['rbot'])
form.HKw.setValue(cc['H'])
form.alphadegKw.setValue(cc.get('alphadeg', 0.))
laminapropKeys = cc.get('laminapropKeys', [cc.get('laminapropKey')])
if isinstance(laminapropKeys, str):
laminapropKeys = [laminapropKeys]
plyts = cc.get('plyts', [cc.get('plyt')])
stack = cc.get('stack',[])
tmp = np.empty((NUM_PLIES, 3), dtype='|S50')
#TODO necessary strange solution to force update
tmp.fill('TODOTODOTODO')
tmp[:len(laminapropKeys), 0] = laminapropKeys
tmp[:len(plyts), 1] = plyts
tmp[:len(stack), 2] = stack
laminate = ','.join([str(tuple(i)) for i in tmp])
form.laminateKw.setValues(laminate)
laminate = laminate.replace('TODOTODOTODO', '')
form.laminateKw.setValues(laminate)
# clearing pl_tableKw
maxRow = db.imp_tables['pl'].getNumRows()
valuesStr = ''
for i in range(1, maxRow):
valuesStr += ', '
valuesStr += ''
db.imp_tables['pl'].clearContents(1, 1, maxRow-1, 1, False)
form.pl_tableKw.setValues(valuesStr)
# setting new pl_tableKw
if 'ploads' in cc.keys():
valuesStr = '0.0,0.5,,'
for i in range(len(cc['ploads'])):
pload = cc['ploads'][i]
valuesStr += '{0:2.1f},'.format(pload)
valuesStr += 'end'
valuesStr = valuesStr.replace(',end', '')
form.pl_tableKw.setValues(valuesStr)
form.pl_numKw.setValue(1)
else:
valuesStr = '0.0,0.5,,0.0'
form.pl_tableKw.setValues(valuesStr)
form.ccKeyKw.setValue('conecyl loaded!')
form.last_loadedKw.setValue(ccname)
input_dict = cc
form.setDefault(update_values=True, input_dict = cc)
db.update_database(update_all=True)
def message(string):
sendCommand("print(r'{0}')".format(string))
###########################################################################
# Class definition
###########################################################################
class TestDB(AFXDataDialog):
"""
"""
def __init__(self, form):
#
# Init
#
self.form = form
self.form.db = self
#
self.logcount = 10000
self.lamMatrix = {'A':None, 'B':None, 'D':None}
self.model_cbs = []
#
#
#
title = 'DESICOS GUI Version {0}'.format(version)
AFXDataDialog.__init__(self, form, title, 0)
self.appendActionButton('Create Study', self, self.ID_CLICKED_APPLY)
self.appendActionButton('Apply defaults', self, self.ID_CLICKED_DEFAULTS)
self.appendActionButton('Close', self, self.ID_CLICKED_CANCEL)
#
# Main Vertical Frame
#
mainVF = FXVerticalFrame(self, LAYOUT_FILL_Y, LAYOUT_FILL_X)
#
# Always visible widgets
#
mainHF = FXHorizontalFrame(mainVF, LAYOUT_CENTER_Y)
AFXTextField(mainHF, 30, 'Study name (without spaces):', form.std_nameKw,
opts=LAYOUT_LEFT)
mainHF = FXHorizontalFrame(mainVF, LAYOUT_CENTER_Y)
tmp = AFXTextField(mainHF, 30, 'Last loaded conecyl:',
form.last_loadedKw)
tmp.setEditable(False)
self.ccs_CB = AFXComboBox(mainHF, 0, 10, 'Select from database:', form.ccKeyKw)
self.new_cc_name = AFXTextField(mainHF, 20, 'New:', form.new_cc_nameKw)
self.save_cc_button = FXButton(mainHF, 'Save')
self.del_cc_button = FXButton(mainHF, 'Delete')
#
#
# Tabs
#
mainTabBook = FXTabBook(mainVF, None, 0, LAYOUT_FILL_X)
#
# Tabs / Load / Save Study
#
FXTabItem(mainTabBook, 'Load / Save Study')
loadFrame = FXHorizontalFrame(mainTabBook, FRAME_RAISED|FRAME_SUNKEN)
loadVF = FXVerticalFrame(loadFrame, LAYOUT_FILL_X|LAYOUT_FILL_Y)
FXLabel(loadVF, '')
FXLabel(loadVF, '')
self.std_to_load = AFXComboBox(loadVF, 0, 10, 'Select study to load:',
form.std_to_postKw)
FXLabel(loadVF, '')
FXLabel(loadVF, '')
loadHF1 = FXHorizontalFrame(loadVF)
self.load_std = FXButton(loadHF1, 'Load Study')
self.save_std = FXButton(loadHF1, 'Save Study')
FXLabel(loadVF, '')
FXLabel(loadVF, '')
FXLabel(loadVF, 'NOTE: If you updated you Abaqus version, please open the .cae file in Abaqus before loading with the Plug-In')
FXLabel(loadVF, '')
FXLabel(loadVF, '')
FXHorizontalSeparator(loadVF)
#
# Tabs / Geometry
#
FXTabItem(mainTabBook, 'Geometry')
#
geomFrame = FXHorizontalFrame(mainTabBook)
geomFrame1=FXGroupBox(geomFrame, 'Shell Geometry', FRAME_GROOVE )
pngpath = os.path.join(DAHOME, 'gui', 'icons', 'geometry.png')
icon = afxCreatePNGIcon(pngpath)
FXLabel(geomFrame1, '', icon)
geomVF = FXVerticalFrame(geomFrame1)
geomVA = AFXVerticalAligner(geomVF)
FXLabel(geomVA, 'Define geometry:')
self.Rbot = AFXTextField(geomVA, 8, 'R:', form.rbotKw, opts=AFXTEXTFIELD_FLOAT)
AFXTextField(geomVA, 8, 'H:', form.HKw, opts=AFXTEXTFIELD_FLOAT)
AFXTextField(geomVA, 8, 'alpha in degrees:',
form.alphadegKw, opts=AFXTEXTFIELD_FLOAT)
FXLabel(geomVF, 'OBS:')
FXLabel(geomVF, ' - For cylinders keep alpha = 0')
FXLabel(geomVF, ' - H includes the resin rings')
#
# Tabs / Model
#
FXTabItem(mainTabBook, 'Model')
modelFrame = FXHorizontalFrame(mainTabBook, FRAME_RAISED|FRAME_SUNKEN)
modelBook = FXTabBook(modelFrame, None, 0,
TABBOOK_LEFTTABS|LAYOUT_FILL_X)
#
# Tabs / Model / Material
#
FXTabItem(modelBook, 'Material', None, TAB_LEFT)
matFrame = FXVerticalFrame(modelBook, LAYOUT_FILL_X|LAYOUT_FILL_Y)
FXLabel(matFrame, 'Lamina / isotropic elastic properties')
matHF = FXHorizontalFrame(matFrame)
self.laminaprops_CB = AFXComboBox(matHF, 0, 5, 'Select from database:', form.laminapropKeyKw)
self.new_laminaprop_name = AFXTextField(matHF, 30, 'New:', form.new_laminaprop_nameKw)
self.new_laminaprop_name.disable()
self.save_laminaprop_button = FXButton(matHF, 'Save')
self.del_laminaprop_button = FXButton(matHF, 'Delete')
matHF1 = FXHorizontalFrame(matFrame)
AFXTextField(matHF1, 8, 'E11' , form.laminapropKw, 1, opts=AFXTEXTFIELD_FLOAT)
AFXTextField(matHF1, 8, 'E22' , form.laminapropKw, 2, opts=AFXTEXTFIELD_FLOAT)
AFXTextField(matHF1, 8, 'nu12', form.laminapropKw, 3, opts=AFXTEXTFIELD_FLOAT)
AFXTextField(matHF1, 8, 'G12' , form.laminapropKw, 4, opts=AFXTEXTFIELD_FLOAT)
AFXTextField(matHF1, 8, 'G13' , form.laminapropKw, 5, opts=AFXTEXTFIELD_FLOAT)
AFXTextField(matHF1, 8, 'G23' , form.laminapropKw, 6, opts=AFXTEXTFIELD_FLOAT)
FXLabel(matFrame, '')
FXLabel(matFrame, 'For isotropic, define E22=E11 and let G12, G13 and G23 as blank fields')
FXLabel(matFrame, '')
FXHorizontalSeparator(matFrame)
FXLabel(matFrame, 'Material allowables (composite only...)')
matHF = FXHorizontalFrame(matFrame)
self.allowables_CB = AFXComboBox(matHF, 0, 5, 'Select from database:', form.allowablesKeyKw)
self.new_allowables_name = AFXTextField(matHF, 30, 'New:', form.new_allowables_nameKw)
self.new_allowables_name.disable()
self.save_allowables_button = FXButton(matHF, 'Save')
self.del_allowables_button = FXButton(matHF, 'Delete')
matHF2 = FXHorizontalFrame(matFrame)
AFXTextField(matHF2, 8, 'S11t', form.allowablesKw, 1, opts=AFXTEXTFIELD_FLOAT)
AFXTextField(matHF2, 8, 'S11c', form.allowablesKw, 2, opts=AFXTEXTFIELD_FLOAT)
AFXTextField(matHF2, 8, 'S22t', form.allowablesKw, 3, opts=AFXTEXTFIELD_FLOAT)
AFXTextField(matHF2, 8, 'S22c', form.allowablesKw, 4, opts=AFXTEXTFIELD_FLOAT)
AFXTextField(matHF2, 8, 'S12' , form.allowablesKw, 5, opts=AFXTEXTFIELD_FLOAT)
AFXTextField(matHF2, 8, 'S13' , form.allowablesKw, 6, opts=AFXTEXTFIELD_FLOAT)
#
# Tabs / Model / Laminate
#
FXTabItem(modelBook, 'Laminate', None, TAB_LEFT)
lamFrame = FXHorizontalFrame(modelBook, FRAME_RAISED|FRAME_SUNKEN)
lamVF = FXVerticalFrame(lamFrame)
lamRadioGB = FXGroupBox(lamVF, 'Define laminate using', FRAME_GROOVE)
FXLabel(lamVF, 'ply 01 material and thickness will be\n'+\
'used for other plies if they are not \n'+\
'especified individually')
sw = FXSwitcher(lamFrame)
self.lamRB = {}
self.lamRB['stack'] = FXRadioButton(lamRadioGB, 'Stacking sequence', sw,
FXSwitcher.ID_OPEN_FIRST)
#TODO
#self.lamRB['ABD' ] = FXRadioButton(lamRadioGB, 'A, B, D matrices', sw,
# FXSwitcher.ID_OPEN_FIRST+1)
# Tabs / Model / Laminate / stack
laminateTable = AFXTable(sw, 21, 4, NUM_PLIES+1, 4,
form.laminateKw, 0,
opts=AFXTABLE_EDITABLE|AFXTABLE_TYPE_FLOAT|AFXTABLE_STYLE_DEFAULT)
laminateTable.setLeadingRows(1)
laminateTable.setLeadingColumns(1)
laminateTable.setLeadingColumnLabels(
'\t'.join(['ply {0:02d}'.format(i) for i in range(1, NUM_PLIES+1)]))
laminateTable.setColumnWidth(1, 300)
laminateTable.setColumnWidth(2, 75)
laminateTable.setColumnWidth(3, 75)
laminateTable.setLeadingRowLabels('material\tthickness\tangle')
laminateTable.setColumnType(1, AFXTable.LIST)
self.laminateTable = laminateTable
# Tabs / Model / Laminate / ABD
#TODO activate direct input of ABD matrix
if False:
lamMatrix = FXMatrix(sw, 2, opts=MATRIX_BY_COLUMNS)
for lam in self.lamMatrix.keys():
table = AFXTable(lamMatrix, 5, 4, 5, 4,
opts=AFXTABLE_EDITABLE|AFXTABLE_STYLE_DEFAULT)
table.setLeadingRows(2)
table.setItemSpan(0, 0, 2, 1)
table.setItemSpan(0, 1, 1, 3)
table.setLeadingColumns(1)
table.setLeadingRowLabels(lam + ' matrix', 0)
table.setLeadingRowLabels('1\t2\t3', 1)
table.showHorizontalGrid(True)
table.showVerticalGrid(True)
self.lamMatrix[ lam ] = table
#
# Tabs / Model / Mesh
#
FXTabItem(modelBook, 'Mesh', None, TAB_LEFT)
meshFrame = FXHorizontalFrame(modelBook, FRAME_RAISED|FRAME_SUNKEN)
meshCB = AFXComboBox(meshFrame, 0, 4, 'Element Type:', form.elem_typeKw)
meshCB.appendItem('S4R' , 1)
meshCB.appendItem('S4R5', 1)
meshCB.appendItem('S8R' , 1)
meshCB.appendItem('S8R5', 1)
self.meshCB = meshCB
meshVA = AFXVerticalAligner(meshFrame)
self.numel_r = AFXTextField(meshVA, 5, 'Number of elements around the circumference:',
form.numel_rKw, opts=AFXTEXTFIELD_INTEGER)
text = 'Define in Geometric Imperfections/Cutouts'
numel_cutout = AFXTextField(meshVA, len(text),
'Number of elements around cutouts:')
numel_cutout.setText(text)
numel_cutout.setEditable(False)
#
# Tabs / Model / Boundary Conditions
#
FXTabItem(modelBook, 'Boundary Conditions', None, TAB_LEFT)
bcFrame = FXHorizontalFrame(modelBook, FRAME_RAISED|FRAME_SUNKEN)
bcHA = FXHorizontalFrame(bcFrame)
bcVAfig = FXVerticalFrame(bcHA)
FXVerticalSeparator(bcHA)
bcVAbot = FXVerticalFrame(bcHA)
FXVerticalSeparator(bcHA)
bcVAtop = FXVerticalFrame(bcHA)
bcVAfig_VA = AFXVerticalAligner(bcVAfig)
AFXTextField(bcVAfig_VA, 8, 'Resin Elastic Modulus:' , form.resin_EKw, opts=AFXTEXTFIELD_FLOAT)
AFXTextField(bcVAfig_VA, 8, 'Resin Poisson`s ratio:', form.resin_nuKw, opts=AFXTEXTFIELD_FLOAT)
AFXTextField(bcVAfig_VA, 8, 'Elements along the resin edge:', form.resin_numelKw, opts=AFXTEXTFIELD_INTEGER)
FXCheckButton(bcVAfig_VA, 'Use DLR Boundary Conditions', form.use_DLR_bcKw)
FXHorizontalSeparator(bcVAfig)
# bottom edge
FXLabel(bcVAbot, 'Bottom Edge')
FXLabel(bcVAbot, '')
FXLabel(bcVAbot, '')
FXCheckButton(bcVAbot, 'Fix Radial displ. of shell edge / resin bottom' , form.bc_fix_bottom_uRKw)
FXCheckButton(bcVAbot, 'Fix Circumferential displ. of shell edge / resin bottom' , form.bc_fix_bottom_vKw)
FXCheckButton(bcVAbot, 'Clamp shell edge' , form.bc_bottom_clampedKw)
FXLabel(bcVAbot, '')
FXLabel(bcVAbot, '')
self.resin_add_BIR = FXCheckButton(bcVAbot, 'Inner Resin Ring Bottom', form.resin_add_BIRKw)
self.resin_add_BOR = FXCheckButton(bcVAbot, 'Outer Resin Ring Bottom', form.resin_add_BORKw)
FXLabel(bcVAbot, '')
FXLabel(bcVAbot, '')
self.bc_fix_bottom_side_uR = FXCheckButton(bcVAbot, 'Fix Radial displ. of resin sides' , form.bc_fix_bottom_side_uRKw)
self.bc_fix_bottom_side_v = FXCheckButton(bcVAbot, 'Fix Circumferential displ. of resin sides' , form.bc_fix_bottom_side_vKw)
self.bc_fix_bottom_side_u3 = FXCheckButton(bcVAbot, 'Fix Radial displ. of resin sides' , form.bc_fix_bottom_side_u3Kw)
FXLabel(bcVAbot, '')
FXLabel(bcVAbot, '')
bcVAbot_VA = AFXVerticalAligner(bcVAbot)
AFXTextField(bcVAbot_VA, 5, 'resin_bot_h:' , form.resin_bot_hKw, opts=AFXTEXTFIELD_FLOAT)
AFXTextField(bcVAbot_VA, 5, 'resin_bir_w1:', form.resin_bir_w1Kw, opts=AFXTEXTFIELD_FLOAT)
AFXTextField(bcVAbot_VA, 5, 'resin_bir_w2:', form.resin_bir_w2Kw, opts=AFXTEXTFIELD_FLOAT)
AFXTextField(bcVAbot_VA, 5, 'resin_bor_w1:', form.resin_bor_w1Kw, opts=AFXTEXTFIELD_FLOAT)
AFXTextField(bcVAbot_VA, 5, 'resin_bor_w2:', form.resin_bor_w2Kw, opts=AFXTEXTFIELD_FLOAT)
# top edge
FXLabel(bcVAtop, 'Top Edge')
FXLabel(bcVAtop, '')
FXLabel(bcVAtop, '')
FXCheckButton(bcVAtop, 'Fix Radial displ. of shell edge / resin top' , form.bc_fix_top_uRKw)
FXCheckButton(bcVAtop, 'Fix Circumferential displ. of shell edge / resin top' , form.bc_fix_top_vKw)
FXCheckButton(bcVAtop, 'Clamp shell edge' , form.bc_top_clampedKw)
FXLabel(bcVAtop, '')
FXLabel(bcVAtop, '')
self.resin_add_TIR = FXCheckButton(bcVAtop, 'Inner Resin Ring Top' , form.resin_add_TIRKw)
self.resin_add_TOR = FXCheckButton(bcVAtop, 'Outer Resin Ring Top' , form.resin_add_TORKw)
FXLabel(bcVAtop, '')
FXLabel(bcVAtop, '')
self.bc_fix_top_side_uR = FXCheckButton(bcVAtop, 'Fix Radial displ. of resin sides' , form.bc_fix_top_side_uRKw)
self.bc_fix_top_side_v = FXCheckButton(bcVAtop, 'Fix Circumferential displ. of resin sides' , form.bc_fix_top_side_vKw)
self.bc_fix_top_side_u3 = FXCheckButton(bcVAtop, 'Fix Radial displ. of resin sides' , form.bc_fix_top_side_u3Kw)
FXLabel(bcVAtop, '')
FXLabel(bcVAtop, '')
bcVAtop_VA = AFXVerticalAligner(bcVAtop)
AFXTextField(bcVAtop_VA, 5, 'resin_top_h:' , form.resin_top_hKw, opts=AFXTEXTFIELD_FLOAT)
AFXTextField(bcVAtop_VA, 5, 'resin_tir_w1:', form.resin_tir_w1Kw, opts=AFXTEXTFIELD_FLOAT)
AFXTextField(bcVAtop_VA, 5, 'resin_tir_w2:', form.resin_tir_w2Kw, opts=AFXTEXTFIELD_FLOAT)
AFXTextField(bcVAtop_VA, 5, 'resin_tor_w1:', form.resin_tor_w1Kw, opts=AFXTEXTFIELD_FLOAT)
AFXTextField(bcVAtop_VA, 5, 'resin_tor_w2:', form.resin_tor_w2Kw, opts=AFXTEXTFIELD_FLOAT)
pngpath = os.path.join(DAHOME, 'gui', 'icons', 'resin_rings.png')
icon = afxCreatePNGIcon(pngpath)
FXLabel(bcVAfig, '')
FXLabel(bcVAfig, '')
FXLabel(bcVAfig, '', icon)
#
# Tabs / Model / Load Steps
#
FXTabItem(modelBook, 'Load Steps', None, TAB_LEFT)
nlHF = FXHorizontalFrame(modelBook, opts=FRAME_RAISED|FRAME_SUNKEN)
# general parameters
nlVFc= FXVerticalFrame(nlHF)
FXLabel(nlVFc, '')
FXLabel(nlVFc, 'Load Definitions')
FXLabel(nlVFc, '')
FXCheckButton(nlVFc, 'Displacement controlled', form.displ_controlledKw)
FXCheckButton(nlVFc, 'Use two load steps:', form.separate_load_stepsKw)
self.axial_displ = AFXTextField(nlVFc, 8, 'Axial displacement:', form.axial_displKw, opts=AFXTEXTFIELD_FLOAT)
self.axial_load = AFXTextField(nlVFc, 8, 'Axial compressive\nload:', form.axial_loadKw, opts=AFXTEXTFIELD_FLOAT)
AFXTextField(nlVFc, 8, 'Pressure load:\n (positive outwards)', form.pressure_loadKw, opts=AFXTEXTFIELD_FLOAT)
FXHorizontalSeparator(nlVFc)
FXLabel(nlVFc, '')
FXLabel(nlVFc, 'Step Number for Each Load')
FXLabel(nlVFc, '')
self.axial_step = AFXTextField(nlVFc, 8, 'Axial loads:', form.axial_stepKw, opts=AFXTEXTFIELD_INTEGER)
self.pload_step = AFXTextField(nlVFc, 8, 'Perturbation loads:', form.pload_stepKw, opts=AFXTEXTFIELD_INTEGER)
self.pressure_step = AFXTextField(nlVFc, 8, 'Pressure load:', form.pressure_stepKw, opts=AFXTEXTFIELD_INTEGER)
# perturbation load step
FXVerticalSeparator(nlHF)
nlVF1 = FXVerticalFrame(nlHF)
FXLabel(nlVF1, '')
FXLabel(nlVF1, 'Step with constant loads (step 1)')
FXLabel(nlVF1, '')
self.art_damp1 = FXCheckButton(nlVF1, 'Artificial Damping', form.artificial_damping1Kw)
FXLabel(nlVF1, '')
self.damp_factor1 = AFXTextField(nlVF1, 8, 'Damping Factor:', form.damping_factor1Kw, opts=AFXTEXTFIELD_FLOAT)
nlVA = AFXVerticalAligner(nlVF1)
self.minInc1 = AFXTextField(nlVA, 8, 'Minimum increment size:', form.minInc1Kw, opts=AFXTEXTFIELD_FLOAT)
self.initialInc1 = AFXTextField(nlVA, 8, 'Initial increment size:', form.initialInc1Kw, opts=AFXTEXTFIELD_FLOAT)
self.maxInc1 = AFXTextField(nlVA, 8, 'Maximum increment size:', form.maxInc1Kw, opts=AFXTEXTFIELD_FLOAT)
self.maxNumInc1 = AFXTextField(nlVA, 8, 'Maximum number of increments:', form.maxNumInc1Kw, opts=AFXTEXTFIELD_FLOAT)
# axial compression step
FXVerticalSeparator(nlHF)
nlVF2 = FXVerticalFrame(nlHF)
pngpath = os.path.join(DAHOME, 'gui', 'icons', 'axial2.png')
icon = afxCreatePNGIcon(pngpath)
FXLabel(nlVF2, '')
FXLabel(nlVF2, 'Step with incremented loads (step 2)', icon)
FXLabel(nlVF2, '')
FXCheckButton(nlVF2, 'Artificial Damping', form.artificial_damping2Kw)
FXLabel(nlVF2, '')
AFXTextField(nlVF2, 8, 'Damping Factor:', form.damping_factor2Kw, opts=AFXTEXTFIELD_FLOAT)
nlVA = AFXVerticalAligner(nlVF2)
AFXTextField(nlVA, 8, 'Minimum increment size:', form.minInc2Kw, opts=AFXTEXTFIELD_FLOAT)
AFXTextField(nlVA, 8, 'Initial increment size:', form.initialInc2Kw, opts=AFXTEXTFIELD_FLOAT)
AFXTextField(nlVA, 8, 'Maximum increment size:', form.maxInc2Kw, opts=AFXTEXTFIELD_FLOAT)
AFXTextField(nlVA, 8, 'Maximum number of increments:', form.maxNumInc2Kw, opts=AFXTEXTFIELD_FLOAT)
#
# Tabs / Model / Output Requests
#
FXTabItem(modelBook, 'Output Requests', None, TAB_LEFT)
outputFrame = FXVerticalFrame(modelBook, FRAME_RAISED|FRAME_SUNKEN)
FXLabel(outputFrame, 'Field Outputs')
AFXTextField(outputFrame, 8, 'Print at every time interval',
form.timeIntervalKw, opts=AFXTEXTFIELD_FLOAT)
FXCheckButton(outputFrame, 'Request stress outputs',
form.stress_outputKw)
#FXHorizontalSeparator(outputFrame)
#FXLabel(outputFrame, 'History Outputs')
#FXCheckButton(outputFrame, 'Load shortening curve').setCheck(True)
#FXCheckButton(outputFrame,
# 'Displacements at the PL points').setCheck(True)
#
# Tabs / Geometric Imperfections
#
FXTabItem(mainTabBook, 'Geometric Imperfections')
impFrame = FXHorizontalFrame(mainTabBook, FRAME_RAISED|FRAME_SUNKEN)
impBook = FXTabBook(impFrame, None, 0, TABBOOK_LEFTTABS|LAYOUT_FILL_X)
#
self.imp_current_num = {}
self.imp_tables = {}
self.imp_spinners = {}
self.imp_num_params = {}
self.imp_maxModels = {}
rowLabels = {}
rowLabels2 = {}
colWidths = {}
visibleCols = {}
labelTabs = {}
labelSpinners = {}
pngs = {}
imp_numKw = {}
self.imp_tableKw = {}
#
# Tabs / Geometric Imperfections /
# Perturbation Loads / Constant Amp. Perturbation Buckle / Dimples / Cutouts / Axisymmetrics / LMBIs
#
labelTabs['pl'] = 'Perturbation Loads'
labelTabs['cbi'] = 'Perturbation Buckle'
labelTabs['d'] = 'Dimples'
labelTabs['ax'] = 'Axisymmetric'
labelTabs['lbmi'] = 'Linear Buckling Modes'
labelTabs['cut'] = 'Cutouts'
colWidths['pl'] = 45
colWidths['cbi'] = 45
colWidths['d'] = 40
colWidths['ax'] = 50
colWidths['lbmi'] = 65
colWidths['cut'] = 60
visibleCols['pl'] = 5
visibleCols['cbi'] = 5
visibleCols['d'] = 6
visibleCols['ax'] = 5
visibleCols['lbmi'] = 4
visibleCols['cut'] = 5
labelSpinners['pl'] = 'Number of perturbation loads:'
labelSpinners['cbi'] = 'Number of perturbation buckles:'
labelSpinners['d'] = 'Number of single buckles'
labelSpinners['ax'] = 'Number of axisymmetrics'
labelSpinners['lbmi'] = 'Number of buckling modes to combine:'
labelSpinners['cut'] = 'Number of cutouts'
self.imp_current_num['pl'] = 32
self.imp_current_num['cbi'] = 32
self.imp_current_num['d'] = 16
self.imp_current_num['ax'] = 16
self.imp_current_num['lbmi'] = 16
self.imp_current_num['cut'] = 16
self.imp_maxModels['pl'] = MAX_MODELS
self.imp_maxModels['cbi'] = MAX_MODELS
self.imp_maxModels['d'] = MAX_MODELS
self.imp_maxModels['ax'] = MAX_MODELS
self.imp_maxModels['lbmi'] = MAX_MODELS
self.imp_maxModels['cut'] = MAX_MODELS
self.imp_num_params['pl'] = 2
self.imp_num_params['cbi'] = 2
self.imp_num_params['d'] = 4
self.imp_num_params['ax'] = 2
self.imp_num_params['lbmi'] = 1
self.imp_num_params['cut'] = 3
rowLabels['pl'] = 'Position theta:\tPosition z/H:\t'
rowLabels['cbi'] = 'Position theta:\tPosition z/H:\t'
rowLabels['d'] = 'Position theta:\tPosition z/H:\t' + \
'Parameter a:\tParameter b:\t'
rowLabels['ax'] = 'Position z/H:\tParameter b:\t'
rowLabels['lbmi'] = 'Mode number\t'
rowLabels['cut'] = 'Position theta:\tPosition z/H:' +\
'\tNr. radial elements\t'
rowLabels2['pl'] = '\tPL value for model'
rowLabels2['cbi'] = '\tPB value for model'
rowLabels2['d'] = '\tWb for model'
rowLabels2['ax'] = '\tWb for model'
rowLabels2['lbmi'] = '\tSF for model'
rowLabels2['cut'] = '\tcutout diameter for model'
pngs['pl'] = 'pl2.png'
pngs['cbi'] = 'cb.png'
pngs['d'] = 'd2.png'
pngs['ax'] = 'axisymmetric.png'
pngs['lbmi'] = 'lbmi2.png'
pngs['cut'] = 'cutout2.png'
self.imp_tableKw['pl'] = form.pl_tableKw
self.imp_tableKw['cbi'] = form.cb_tableKw
self.imp_tableKw['d'] = form.d_tableKw
self.imp_tableKw['ax'] = form.ax_tableKw
self.imp_tableKw['lbmi'] = form.lbmi_tableKw
self.imp_tableKw['cut'] = form.cut_tableKw
imp_numKw['pl'] = form.pl_numKw
imp_numKw['cbi'] = form.cb_numKw
imp_numKw['d'] = form.d_numKw
imp_numKw['ax'] = form.ax_numKw
imp_numKw['lbmi'] = form.lbmi_numKw
imp_numKw['cut'] = form.cut_numKw
#
for k in ['pl', 'cbi', 'd', 'ax', 'lbmi', 'cut']:
maxIMP = self.imp_current_num[k]
num_param = self.imp_num_params[k]
maxModels = self.imp_maxModels[k]
FXTabItem(impBook, labelTabs[k], None, TAB_LEFT)
impVF = FXVerticalFrame(impBook, LAYOUT_FILL_Y|FRAME_RAISED|FRAME_SUNKEN)
impHF = FXHorizontalFrame(impVF)
self.imp_spinners[k] = AFXSpinner(impHF, 2, labelSpinners[k], imp_numKw[k])
self.imp_spinners[k].setRange(0, maxIMP)
FXHorizontalSeparator(impVF)
impHF = FXHorizontalFrame(impVF)
self.imp_tables[k] = AFXTable(impHF, 20, visibleCols[k]+1,
maxModels+num_param+2, maxIMP+1,
self.imp_tableKw[k], 0,
opts=AFXTABLE_TYPE_FLOAT|AFXTABLE_STYLE_DEFAULT)
for i in range(self.imp_current_num[k]):
self.imp_tables[k].setColumnWidth(i+1, colWidths[k])
self.imp_tables[k].setLeadingRows(1)
self.imp_tables[k].setLeadingColumns(1)
self.imp_tables[k].showHorizontalGrid(True)
self.imp_tables[k].showVerticalGrid(True)
self.imp_tables[k].setGridColor(1)
colLabel = ''
for i in range(1, maxIMP+1):
colLabel += k.upper() + '{0:02d}\t'.format(i)
self.imp_tables[k].setColumnEditable(i, True)
self.imp_tables[k].setItemEditable(num_param + 1, i, False)
self.imp_tables[k].setColumnType(i,
self.imp_tables[k].FLOAT)
self.imp_tables[k].setLeadingRowLabels(colLabel)
rowLabel = rowLabels[k]
for i in range(1, maxModels+1):
rowLabel += rowLabels2[k] + ' {0:02d}'.format(i)
self.imp_tables[k].setLeadingColumnLabels(rowLabel)
pngpath = os.path.join(DAHOME, 'gui', 'icons', pngs[k])
icon = afxCreatePNGIcon(pngpath)
FXLabel(impHF, '', icon)
#
# Tabs / Geometric Imperfections / Ply Piece imperfections
#
self.current_num_plies = NUM_PLIES
FXTabItem(impBook, 'Ply Piece Imperfection', None, TAB_LEFT)
impVF = FXVerticalFrame(impBook, LAYOUT_FILL_Y|FRAME_RAISED|FRAME_SUNKEN)
impHF = FXHorizontalFrame(impVF, opts=LAYOUT_CENTER_Y)
impVF = FXVerticalFrame(impHF)
FXCheckButton(impVF, 'Enable Ply Piece Imperfection', form.ppi_enabledKw)
FXLabel(impVF, '')
FXHorizontalSeparator(impVF)
FXLabel(impVF, '')
pngpath = os.path.join(DAHOME, 'gui', 'icons', 'extra_height.png')
icon = afxCreatePNGIcon(pngpath)
FXLabel(impVF, '', icon)
AFXTextField(impVF, 8, 'Extra height along top / bottom edge:', form.ppi_extra_heightKw, opts=AFXTEXTFIELD_FLOAT)
FXLabel(impVF, '')
FXHorizontalSeparator(impVF)
FXLabel(impVF, '')
lbl = FXLabel(impVF, 'Visualization of ply pieces and fiber orientation')
lbl.setFont(getAFXFont(FONT_BOLD))
AFXNote(impVF, 'The plot is made for an existing cone model,\n' +
'that may have been created using parameters\n' +
'that differ from those shown in this window.')
plotVA = AFXVerticalAligner(impVF)
self.model_cbs.append(AFXComboBox(plotVA, 2, 10, 'Select model:',
form.plot_imp_modelKw))
self.plot_ply_index = AFXSpinner(plotVA, 2, 'Ply index:',
form.plot_ply_indexKw)
form.plot_ply_indexKw.setValue(1)
plot_type = AFXComboBox(plotVA, 2, 10,
"Plot type:",
form.plot_imp_typeKw)
for i in range(1, 7):
plot_type.appendItem('Plot type {0}'.format(i))
plot_type.setCurrentItem(0)
form.plot_imp_typeKw.setValue(plot_type.getItemText(0))
AFXNote(impVF, 'See Post-processing -> Opened contour plots\n' +
'for examples of plot types.')
self.plot_ppi_button = FXButton(impVF, 'Create plot')
FXVerticalSeparator(impHF)
impVF = FXVerticalFrame(impHF)
pngpath = os.path.join(DAHOME, 'gui', 'icons', 'ply_pieces.png')
icon = afxCreatePNGIcon(pngpath)
FXLabel(impVF, '', icon)
AFXNote(impVF, 'The number of editable table rows matches the stack ' +
'length set in the Model -> Laminate tab.\n' +
'The table is locked entirely if the imperfection is not enabled. ' +
'(top left checkbox)')
ppiTable = AFXTable(impVF, 10, 6, NUM_PLIES+1, 6,
form.ppi_tableKw, 0,
opts=AFXTABLE_EDITABLE|AFXTABLE_TYPE_FLOAT|AFXTABLE_STYLE_DEFAULT)
ppiTable.setLeadingRows(1)
ppiTable.setLeadingColumns(1)
ppiTable.setLeadingColumnLabels(
'\t'.join(['ply {0:02d}'.format(i) for i in range(1, NUM_PLIES+1)]))
ppiTable.setColumnWidth(-1, 120)
ppiTable.setColumnType(-1, AFXTable.FLOAT)
ppiTable.setColumnEditable(5, False)
ppiTable.shadeReadOnlyItems(True)
row_headings = ['Starting position\n(Required)',
'Angular offset (0..1)\n(Optional, default 0)',
'Maximum width\n(Required)',
'Eccentricity (0..1)\n(Optional, see **)',
"Orientation (\xb0)\n(from 'Laminate')"]
ppiTable.setLeadingRowLabels('\t'.join(row_headings))
self.ppiTable = ppiTable
FXLabel(impVF, "(**) Default value for 'Eccentricity' is 1.0 if " +
'orientation > 0, 0.0 if orientation < 0 and ' +
'0.5 if orientation = 0')
#
# Tabs / Geometric Imperfections / Fiber fraction Imperfections
#
FXTabItem(impBook, 'Fiber Fraction Imperfection', None, TAB_LEFT)
impVF = FXVerticalFrame(impBook, LAYOUT_FILL_Y|FRAME_RAISED|FRAME_SUNKEN)
impHF = FXHorizontalFrame(impVF, opts=LAYOUT_CENTER_Y)
impVF = FXVerticalFrame(impHF)
FXLabel(impVF, '')
FXLabel(impVF, 'The default values (0, Off) will not apply the imperfection',
opts=LAYOUT_CENTER_X)
self.imp_ffi_sf = AFXTable(impVF, 21, 3,(MAX_MODELS+1), 3, form.ffi_scalingsKw, 0,
opts=AFXTABLE_EDITABLE|AFXTABLE_TYPE_FLOAT|AFXTABLE_STYLE_DEFAULT)
self.imp_ffi_sf.setLeadingRows(1)
self.imp_ffi_sf.setLeadingColumns(1)
self.imp_ffi_sf.setLeadingRowLabels('global thickness\nscaling factor\tuse thickness\nimperfection data')
colLabel = '\t'.join(['model {0:02d}'.format(i) for i in range(1, MAX_MODELS+1)])
self.imp_ffi_sf.setLeadingColumnLabels(colLabel)
self.imp_ffi_sf.setColumnWidth(-1, 120)
self.imp_ffi_sf.setColumnType(2, AFXTable.BOOL)
FXVerticalSeparator(impHF)
impVF2 = FXVerticalFrame(impHF)
FXLabel(impVF2, '')
FXLabel(impVF2, 'Parameters:')
FXLabel(impVF2, '')
impVA = AFXVerticalAligner(impVF2)
AFXTextField(impVA, 8, 'Nominal fiber volume fraction:',
form.ffi_nominal_vfKw, opts=AFXTEXTFIELD_FLOAT)
AFXTextField(impVA, 8, 'Matrix Elastic Modulus:',
form.ffi_E_matrixKw, opts=AFXTEXTFIELD_FLOAT)
AFXTextField(impVA, 8, "Matrix Poisson's ratio:",
form.ffi_nu_matrixKw, opts=AFXTEXTFIELD_FLOAT)
FXLabel(impVF2, '')
FXHorizontalSeparator(impVF2)
FXLabel(impVF2, '')
pngpath = os.path.join(DAHOME, 'gui', 'icons', 'fiber_fraction.png')
icon = afxCreatePNGIcon(pngpath)
FXLabel(impVF2, '', icon)
#
# Tabs / Geometric Imperfections / Mid-Surface Imperfections
#
FXTabItem(impBook, 'Mid-Surface Imperfections', None, TAB_LEFT)
impVF = FXVerticalFrame(impBook, LAYOUT_FILL_Y|FRAME_RAISED|FRAME_SUNKEN)
impHF = FXHorizontalFrame(impVF, opts=LAYOUT_CENTER_Y)
impVF = FXVerticalFrame(impHF)
FXCheckButton(impVF, 'Use the "theta z imperfection" format', form.imp_ms_theta_z_formatKw)
FXLabel(impVF, '')
FXLabel(impVF, '')
self.imp_msi_db = AFXComboBox(impVF, 0, 15, 'Select from database:', form.imp_msKw)
reload(conecylDB)
if form.imp_ms_theta_z_formatKw.getValue():
imps = conecylDB.imps_theta_z
else:
imps = conecylDB.imps
keys = map(str, [k for k in imps.keys() if 'msi' in imps[k].keys()])
keys.sort()
self.imp_msi_db.appendItem('')
for k in keys:
self.imp_msi_db.appendItem(k)
FXCheckButton(impVF, 'Strech H_points to H_measured', form.imp_ms_stretch_HKw)
impVA = AFXVerticalAligner(impVF)
AFXTextField(impVA, 8, 'Radius tolerance to ignore dummy points (% of the radius):', form.imp_r_TOLKw, opts=AFXTEXTFIELD_FLOAT)
AFXTextField(impVA, 8, 'Number of closest points to use in the inverse weighted interpolation:', form.imp_ms_ncpKw, opts=AFXTEXTFIELD_INTEGER)
AFXTextField(impVA, 8, 'Power parameter to use in the inverse weighted interpolation:\n'+\
'(when increased, increases the influence of the closest points)', form.imp_ms_power_parameterKw, opts=AFXTEXTFIELD_FLOAT)
FXLabel(impHF, ' ')
impVF2 = FXVerticalFrame(impHF)
FXLabel(impVF2, 'scaling factor=0 will NOT\napply the imperfection', opts=LAYOUT_CENTER_X)
self.imp_ms_sf = AFXTable(impVF2, 21, 2,(MAX_MODELS+1), 2, form.imp_ms_scalingsKw, 0,
opts=AFXTABLE_EDITABLE|AFXTABLE_TYPE_FLOAT|AFXTABLE_STYLE_DEFAULT)
self.imp_ms_sf.setLeadingRows(1)
self.imp_ms_sf.setLeadingColumns(1)
self.imp_ms_sf.setLeadingRowLabels('scaling factor')
colLabel = '\t'.join(['model {0:02d}'.format(i) for i in range(1, MAX_MODELS+1)])
self.imp_ms_sf.setLeadingColumnLabels(colLabel)
FXLabel(impVF, '')
FXLabel(impVF, '')
self.apply_imp_ms = FXButton(impVF, 'Apply Mid-Surface Imperfections')
FXLabel(impVF, '')
FXHorizontalSeparator(impVF)
FXLabel(impVF, '')
lbl = FXLabel(impVF, 'Visualization of mid-surface imperfection')
lbl.setFont(getAFXFont(FONT_BOLD))
AFXNote(impVF, 'The plot is made for an existing cone model,' +
' that may have been created\nusing parameters' +
' that differ from those shown in this window.')
plotVA = AFXVerticalAligner(impVF)
self.model_cbs.append(AFXComboBox(plotVA, 2, 10, 'Select model:',
form.plot_imp_modelKw))
plot_type = AFXComboBox(plotVA, 2, 10,
"Plot type:",
form.plot_imp_typeKw)
for i in range(1, 7):
plot_type.appendItem('Plot type {0}'.format(i))
plot_type.setCurrentItem(0)
AFXNote(impVF, 'See Post-processing -> Opened contour plots' +
' for examples of plot types.')
self.plot_msi_button = FXButton(impVF, 'Create plot')
#
# Tabs / Geometric Imperfections / Thickness imperfections
#
FXTabItem(impBook, 'Thickness imperfections', None, TAB_LEFT)
impVF = FXVerticalFrame(impBook, LAYOUT_FILL_Y|FRAME_RAISED|FRAME_SUNKEN)
impHF = FXHorizontalFrame(impVF, opts=LAYOUT_CENTER_Y)
impVF = FXVerticalFrame(impHF)
FXCheckButton(impVF, 'Use the "theta z thickness" format', form.imp_t_theta_z_formatKw)
FXLabel(impVF, '')
FXLabel(impVF, '')
self.imp_ti_db = AFXComboBox(impVF, 0, 15, 'Select from database:', form.imp_thickKw)
reload(conecylDB)
if form.imp_t_theta_z_formatKw.getValue():
imps = conecylDB.imps_theta_z
else:
imps = conecylDB.imps
keys = map(str, [k for k in imps.keys() if 'ti' in imps[k].keys()])
keys.sort()
self.imp_ti_db.appendItem('')
for k in keys:
self.imp_ti_db.appendItem(k)
FXCheckButton(impVF, 'Strech H_points to H_measured', form.imp_t_stretch_HKw)
impVA = AFXVerticalAligner(impVF)
AFXTextField(impVA, 8, 'Define number of properties to use (zero to use from measured data):', form.imp_num_setsKw, opts=AFXTEXTFIELD_INTEGER)
AFXTextField(impVA, 8, 'Number of closest points to use in the inverse weighted interpolation:', form.imp_t_ncpKw, opts=AFXTEXTFIELD_INTEGER)
AFXTextField(impVA, 8, 'Power parameter to use in the inverse weighted interpolation:\n'+\
'(when increased, increases the influence of the closest points)', form.imp_t_power_parameterKw, opts=AFXTEXTFIELD_FLOAT)
FXLabel(impHF, ' ')
impVF2 = FXVerticalFrame(impHF)
FXLabel(impVF2, 'scaling factor=0 will NOT\napply the imperfection', opts=LAYOUT_CENTER_X)
self.imp_t_sf = AFXTable(impVF2, 21, 2,(MAX_MODELS+1), 2,
form.imp_t_scalingsKw, 0,
opts=AFXTABLE_EDITABLE|AFXTABLE_TYPE_FLOAT|AFXTABLE_STYLE_DEFAULT)
self.imp_t_sf.setLeadingRows(1)
self.imp_t_sf.setLeadingColumns(1)
self.imp_t_sf.setLeadingRowLabels('scaling factor')
colLabel = '\t'.join(['model {0:02d}'.format(i) for i in range(1, MAX_MODELS+1)])
self.imp_t_sf.setLeadingColumnLabels(colLabel)
FXLabel(impVF, '')
FXLabel(impVF, '')
self.apply_imp_t = FXButton(impVF, 'Apply Thickness Imperfections')
FXLabel(impVF, '')
FXHorizontalSeparator(impVF)
FXLabel(impVF, '')
lbl = FXLabel(impVF, 'Visualization of thickness imperfection')
lbl.setFont(getAFXFont(FONT_BOLD))
AFXNote(impVF, 'The plot is made for an existing cone model,' +
' that may have been created\nusing parameters' +
' that differ from those shown in this window.')
plotVA = AFXVerticalAligner(impVF)
self.model_cbs.append(AFXComboBox(plotVA, 2, 10, 'Select model:',
form.plot_imp_modelKw))
plot_type = AFXComboBox(plotVA, 2, 10,
"Plot type:",
form.plot_imp_typeKw)
for i in range(1, 7):
plot_type.appendItem('Plot type {0}'.format(i))
plot_type.setCurrentItem(0)
AFXNote(impVF, 'See Post-processing -> Opened contour plots' +
' for examples of plot types.')
self.plot_ti_button = FXButton(impVF, 'Create plot')
#
# Tabs / Load Imperfection
#
FXTabItem(mainTabBook, 'Load Imperfection')
liFrame = FXHorizontalFrame(mainTabBook, FRAME_RAISED|FRAME_SUNKEN)
liBook = FXTabBook(liFrame, None, 0, TABBOOK_LEFTTABS|LAYOUT_FILL_X)
#
# Tabs / Load Imperfection / Load Asymmetry
#
FXTabItem(liBook, 'Load Asymmetry', None, TAB_LEFT)
liHF = FXHorizontalFrame(liBook,)# opts=LAYOUT_CENTER_X|LAYOUT_FILL_Y|FRAME_RAISED|FRAME_SUNKEN)
liVF = FXVerticalFrame(liHF, opts=LAYOUT_CENTER_X|FRAME_RAISED|FRAME_SUNKEN)
pngpath = os.path.join(DAHOME, 'gui', 'icons', 'la.png')
icon = afxCreatePNGIcon(pngpath)
self.la_fig = FXLabel(liHF, '', icon)
liVF1 = FXVerticalFrame(liVF, opts=LAYOUT_CENTER_X|FRAME_RAISED|FRAME_SUNKEN)
self.lasw = FXSwitcher(liVF)
FXRadioButton(liVF1, 'Do not apply load asymmetry', self.lasw, FXSwitcher.ID_OPEN_FIRST)
FXRadioButton(liVF1, 'Unique load asymmetry to all models', self.lasw, FXSwitcher.ID_OPEN_FIRST+1)
FXRadioButton(liVF1, 'Different load asymmetry for each model', self.lasw, FXSwitcher.ID_OPEN_FIRST+2)
FXLabel(self.lasw, 'No load asymmetry will be applied')
liFA = AFXVerticalAligner(self.lasw)
self.la_beta = AFXTextField(liFA, 8, 'beta (degrees):', form.betadegKw, opts=AFXTEXTFIELD_FLOAT)
self.la_omega = AFXTextField(liFA, 8, 'omega (degrees):', form.omegadegKw, opts=AFXTEXTFIELD_FLOAT)
self.lasw.setCurrent(self.form.laKw.getValue())
#
liFB = FXHorizontalFrame(self.lasw)
self.betadegs = AFXTable(liFB, 21, 2,(MAX_MODELS+1), 2,
form.betadegsKw, 0,
opts=AFXTABLE_EDITABLE|AFXTABLE_TYPE_FLOAT|AFXTABLE_STYLE_DEFAULT)
self.betadegs.setLeadingRows(1)
self.betadegs.setLeadingColumns(1)
self.betadegs.setLeadingRowLabels('beta (degrees)')
self.betadegs.setLeadingColumnLabels(colLabel)
self.omegadegs = AFXTable(liFB, 21, 2,(MAX_MODELS+1), 2,
form.omegadegsKw, 0,
opts=AFXTABLE_EDITABLE|AFXTABLE_TYPE_FLOAT|AFXTABLE_STYLE_DEFAULT)
self.omegadegs.setLeadingRows(1)
self.omegadegs.setLeadingColumns(1)
self.omegadegs.setLeadingRowLabels('omega (degrees)')
self.omegadegs.setLeadingColumnLabels(colLabel)
#
#
# Tabs / Run
#
FXTabItem(mainTabBook, 'Run')
execFrame = FXHorizontalFrame(mainTabBook, FRAME_RAISED|FRAME_SUNKEN)
execVF = FXVerticalFrame(execFrame, LAYOUT_FILL_X|LAYOUT_FILL_Y)
execHF = FXHorizontalFrame(execVF, opts=LAYOUT_FILL_X)
execVF2 = FXVerticalFrame(execHF, opts=LAYOUT_CENTER_Y)
self.std_to_run = AFXComboBox(execVF2, 0, 10, 'Select study to run:',
form.std_to_postKw)
FXLabel(execVF2, '')
AFXTextField(execVF2, 5,
'Number of cpus (some licenses do not allow this feature)',
form.ncpusKw, opts=AFXTEXTFIELD_INTEGER)
FXLabel(execVF2, '')
FXCheckButton(execVF2,
'Use job stopper (default: after the second drop or after 30%\n'+
'of reaction load drop it stops the analysis)',
form.use_job_stopperKw)
FXLabel(execVF2, '')
self.clean_output = FXButton(execVF2, 'Clean output folder')
FXLabel(execVF2, '')
self.exec_std = FXButton(execVF2, 'Run study')
self.exec_log = FXText(execHF, None, 0,
TEXT_READONLY|TEXT_SHOWACTIVE|LAYOUT_FIX_WIDTH|LAYOUT_FIX_HEIGHT|
LAYOUT_CENTER_X|LAYOUT_CENTER_Y, 0, 0, 500, 440)
self.exec_log.setBarColumns(3)
self.exec_log.setBarColor(FXRGB(190, 190, 190))
self.exec_log.setText('RUN LOG FILE')
#
# Tabs / Post-processing
#
FXTabItem(mainTabBook, 'Post-processing')
postFrame = FXVerticalFrame(mainTabBook, FRAME_RAISED|FRAME_SUNKEN)
self.std_to_post = AFXComboBox(postFrame, 0, 10, 'Select study:',
form.std_to_postKw, opts=LAYOUT_CENTER_X)
postBook = FXTabBook(postFrame, None, 0, TABBOOK_LEFTTABS|LAYOUT_FILL_X)
#
# Tabs / Post-processing / Load shortening curves
#
FXTabItem(postBook, 'Load shortening curves', None, TAB_LEFT)
postVF = FXVerticalFrame(postBook, FRAME_RAISED|FRAME_SUNKEN)
postVF2 = FXVerticalFrame(postVF, opts=LAYOUT_CENTER_X|LAYOUT_CENTER_Y)
self.post_ls_button = FXButton(postVF2, 'Plot load shortening curves')
FXCheckButton(postVF2, 'Put plots in Excel', form.post_put_in_ExcelKw)
FXCheckButton(postVF2, 'Open Excel', form.post_open_ExcelKw)
#
# Tabs / Post-processing / Knock-down curve
#
FXTabItem(postBook, 'Knock-down curve', None, TAB_LEFT)
postVF = FXVerticalFrame(postBook, FRAME_RAISED|FRAME_SUNKEN)
postVF2 = FXVerticalFrame(postVF, opts=LAYOUT_CENTER_X|LAYOUT_CENTER_Y)
FXCheckButton(postVF2, 'Put plots in Excel', form.post_put_in_ExcelKw)
FXCheckButton(postVF2, 'Open Excel', form.post_open_ExcelKw)
postVF2 = FXVerticalFrame(postVF, opts=LAYOUT_CENTER_X|LAYOUT_CENTER_Y)
self.post_kdf_button = FXButton(postVF2, 'Plot knock-down curves')
#
# Tabs / Post-processing / Stress analysis
#
FXTabItem(postBook, 'Stress analysis', None, TAB_LEFT)
postVF = FXVerticalFrame(postBook, FRAME_RAISED|FRAME_SUNKEN)
postVF2 = FXVerticalFrame(postVF, opts=LAYOUT_CENTER_X|LAYOUT_CENTER_Y)
self.model_cbs.append(AFXComboBox(postVF2, 0, 10, 'Select model:',
form.model_to_postKw))
FXLabel(postVF2, 'Stress analysis using the Hashin and Tsai-Wu criteria (implemented for composite/monolitic only)')
FXLabel(postVF2, 'This macro performs an envolope among all elements, ' +\
'among all the plies, considering for each ply: the ' +\
'bottom, the middle and the top')
postVF2 = FXVerticalFrame(postVF, opts=LAYOUT_CENTER_X|LAYOUT_CENTER_Y)
self.post_stress_button = FXButton(postVF2, 'Start stress analysis')
#
# Tabs / Post-processing / Utils
#
FXTabItem(postBook, 'Opened Contour Plots', None, TAB_LEFT)
postVF = FXVerticalFrame(postBook, FRAME_RAISED|FRAME_SUNKEN)
FXLabel(postVF, 'Plot current field output as an opened cone/cylinder.'
+ ' NOTE: For cylinders it will always be Plot type 5')
FXLabel(postVF, '')
postHF = FXHorizontalFrame(postVF)
postVF1 = FXVerticalFrame(postHF, opts=LAYOUT_LEFT|LAYOUT_CENTER_Y)
postVF2 = FXVerticalFrame(postHF, opts=LAYOUT_LEFT|LAYOUT_CENTER_Y)
postVF3 = FXVerticalFrame(postHF, opts=LAYOUT_LEFT|LAYOUT_CENTER_Y)
self.plot_type_buttons = []
button = FXButton(postVF1, 'Plot type 1')
pngpath = os.path.join(DAHOME, 'gui', 'icons', 'plot_type_1.png')
icon = afxCreatePNGIcon(pngpath)
FXLabel(postVF1, '', icon, opts=ICON_AFTER_TEXT)
self.plot_type_buttons.append(button)
button = FXButton(postVF1, 'Plot type 2')
pngpath = os.path.join(DAHOME, 'gui', 'icons', 'plot_type_2.png')
icon = afxCreatePNGIcon(pngpath)
FXLabel(postVF1, '', icon, opts=ICON_AFTER_TEXT)
self.plot_type_buttons.append(button)
button = FXButton(postVF2, 'Plot type 3')
pngpath = os.path.join(DAHOME, 'gui', 'icons', 'plot_type_3.png')
icon = afxCreatePNGIcon(pngpath)
FXLabel(postVF2, '', icon, opts=ICON_AFTER_TEXT)
self.plot_type_buttons.append(button)
button = FXButton(postVF1, 'Plot type 4')
pngpath = os.path.join(DAHOME, 'gui', 'icons', 'plot_type_4.png')
icon = afxCreatePNGIcon(pngpath)
FXLabel(postVF1, '', icon, opts=ICON_AFTER_TEXT)
self.plot_type_buttons.append(button)
button = FXButton(postVF2, 'Plot type 5')
pngpath = os.path.join(DAHOME, 'gui', 'icons', 'plot_type_5.png')
icon = afxCreatePNGIcon(pngpath)
FXLabel(postVF2, '', icon, opts=ICON_AFTER_TEXT)
self.plot_type_buttons.append(button)
button = FXButton(postVF3, 'Plot type 6')
pngpath = os.path.join(DAHOME, 'gui', 'icons', 'plot_type_6.png')
icon = afxCreatePNGIcon(pngpath)
FXLabel(postVF3, '', icon, opts=ICON_AFTER_TEXT)
self.plot_type_buttons.append(button)
#
# Tabs / About this plug-in
#
FXTabItem(mainTabBook, 'About this plug-in')
aboutVF = FXVerticalFrame(mainTabBook, FRAME_RAISED|FRAME_SUNKEN)
pngpath = os.path.join(DAHOME, 'gui', 'icons', 'pfh.png')
icon = afxCreatePNGIcon(pngpath)
FXLabel(aboutVF, 'DESICOS package Version {0}'.format(version))
FXLabel(aboutVF, '')
FXLabel(aboutVF, 'Released by partner:', icon, opts=ICON_AFTER_TEXT)
pngpath = os.path.join(DAHOME, 'gui', 'icons', 'desicos2.png')
icon = afxCreatePNGIcon(pngpath)
FXLabel(aboutVF, '', icon)
tmp = FXText(aboutVF, None, 0, TEXT_READONLY|LAYOUT_FIX_WIDTH|LAYOUT_FIX_HEIGHT|\
LAYOUT_CENTER_Y, 0, 0, 700, 150)
tmp.setText(\
'OBS:\n\n'
'- Have fun!\n\n')
FXLabel(aboutVF, 'Contact: <EMAIL>')
#
self.extraUpdates()
def update_database(self, update_all=False):
form = self.form
if update_all:
ccs = fetch('ccs', local_only=True)
laminaprops = fetch('laminaprops')
allowables = fetch('allowables')
self.ccs = fetch('ccs')
self.laminaprops = laminaprops
self.allowables = allowables
keys_ccs = sorted(map(str, ccs.keys()))
keys_laminaprops = sorted(map(str, laminaprops.keys()))
keys_allowables = sorted(map(str, allowables.keys()))
# ccs
keys = keys_ccs + sorted(conecylDB.include_in_GUI)
self.ccs_keys = keys
self.ccs_CB.clearItems()
self.ccs_CB.appendItem('Enter New')
for k in keys:
self.ccs_CB.appendItem(k)
# laminaprops
keys = keys_laminaprops
self.laminaprops_keys = keys
self.stackTableListId = self.laminateTable.addList(
' \t' + '\t'.join(keys))
self.laminateTable.setColumnListId(1, self.stackTableListId)
self.laminaprops_CB.clearItems()
self.laminaprops_CB.appendItem('Enter New')
for k in keys:
self.laminaprops_CB.appendItem(k)
# allowables
keys = keys_allowables
self.allowables_keys = keys
self.allowables_CB.clearItems()
self.allowables_CB.appendItem('Enter New')
for k in keys:
self.allowables_CB.appendItem(k)
# ccs
k = form.ccKeyKw.getValue()
if k in self.ccs_keys and k != form.last_loadedKw.getValue():
cc = self.ccs[k]
cc_dict2form(ccname=k, cc=cc, db=self, form=form)
if k == 'Enter New':
self.new_cc_name.enable()
self.save_cc_button.enable()
self.del_cc_button.disable()
elif k == 'deleted!' or k == 'conecyl loaded!':
self.new_cc_name.disable()
self.save_cc_button.disable()
self.del_cc_button.disable()
else:
self.new_cc_name.disable()
self.save_cc_button.disable()
self.del_cc_button.enable()
# laminaprops
k = form.laminapropKeyKw.getValue()
if k in self.laminaprops_keys:
v = self.laminaprops[k]
vstr = ','.join([str(i) for i in v])
form.laminapropKw.setValues(vstr)
if k == 'Enter New':
self.new_laminaprop_name.enable()
self.save_laminaprop_button.enable()
self.del_laminaprop_button.disable()
elif k == 'deleted!':
self.new_laminaprop_name.disable()
self.save_laminaprop_button.disable()
self.del_laminaprop_button.disable()
else:
self.new_laminaprop_name.disable()
self.save_laminaprop_button.disable()
self.del_laminaprop_button.enable()
# allowables
k = form.allowablesKeyKw.getValue()
if k in self.allowables_keys:
v = self.allowables[k]
vstr = ','.join([str(i) for i in v])
form.allowablesKw.setValues(vstr)
if k == 'Enter New':
self.new_allowables_name.enable()
self.save_allowables_button.enable()
self.del_allowables_button.disable()
elif k == 'deleted!':
self.new_allowables_name.disable()
self.save_allowables_button.disable()
self.del_allowables_button.disable()
else:
self.new_allowables_name.disable()
self.save_allowables_button.disable()
self.del_allowables_button.enable()
def save_cc(self):
name = self.form.new_cc_nameKw.getValue()
value = cc_form2dict(self, self.form)
self.form.last_loadedKw.setValue(name)
message(conecylDB.save('ccs', name, value))
def del_cc(self):
name = self.form.ccKeyKw.getValue()
message(conecylDB.delete('ccs', name))
def save_laminaprop(self):
name = self.form.new_laminaprop_nameKw.getValue()
value = self.form.laminapropKw.getValues()
value = tuple(float(i) for i in value.split(',') if i != '')
if len(value) == 2:
value = (value[0], value[0], value[1])
message(conecylDB.save('laminaprops', name, value))
def del_laminaprop(self):
name = self.form.laminapropKeyKw.getValue()
message(conecylDB.delete('laminaprops', name))
def save_allowables(self):
name = self.form.new_allowables_nameKw.getValue()
value = self.form.allowablesKw.getValues()
value = tuple(float(i) for i in value.split(','))
message(conecylDB.save('allowables', name, value))
def del_allowables(self):
name = self.form.allowablesKeyKw.getValue()
message(conecylDB.delete('allowables', name))
def extraUpdates(self):
# updating list of studies
keys = mdb.models.keys()
tmplst = []
for k in keys:
if k[-3:] != '_lb':
tmplst.append(k.split('_'))
std_names = set(['_'.join(k[:len(k)-2]) for k in tmplst])
names = os.listdir(TMP_DIR)
for name in names:
if name.find('.study') > -1:
std_names.add(name.split('.')[0])
std_names = list(std_names)
std_names.sort()
#
self.std_to_load.clearItems()
self.std_to_post.clearItems()
self.std_to_run.clearItems()
for std_name in std_names:
self.std_to_post.appendItem(std_name)
self.std_to_load.appendItem(std_name)
self.std_to_run.appendItem(std_name)
for cb in self.model_cbs:
cb.clearItems()
keys = [k for k in keys if not k.endswith('_lb')]
for cb in self.model_cbs:
for k in keys:
cb.appendItem(k)
if self.form.model_to_postKw.getValue() not in keys and len(keys) > 0:
self.form.model_to_postKw.setValue(keys[0])
if self.form.plot_imp_modelKw.getValue() not in keys and len(keys) > 0:
self.form.plot_imp_modelKw.setValue(keys[0])
self.update_database(update_all=True)
def slowUpdates(self):
form = self.form
std_name = form.std_to_postKw.getValue()
self.logcount = 0
log_path = os.path.join(TMP_DIR, std_name, 'run_log.txt')
if os.path.isfile(log_path):
log_file = open(log_path, 'r')
text = ''
for line in log_file.readlines():
text += line
log_file.close()
self.exec_log.setText(text)
self.exec_log.setCursorRow(100)
def saveStudy(self):
message('Saving...')
self.form.laKw.setValue(self.lasw.getCurrent())
self.logcount = 10000
command = ('import gui_commands\n' +
'reload(gui_commands)\n' +
'gui_commands.save_study("{0}", {1})\n'.format(
str(self.form.std_nameKw.getValue()),
str(self.form.get_params_from_gui())))
sendCommand(command)
self.extraUpdates()
def processUpdates(self):
form = self.form
std_name = form.std_nameKw.getValue()
form.std_nameKw.setValue(rsc(std_name))
# imp_tables[k]
for k in ['pl', 'cbi', 'd', 'ax', 'lbmi', 'cut']:
correct_num = self.imp_spinners[k].getValue()
current_num = self.imp_current_num[k]
if current_num > correct_num:
self.imp_current_num[k] = correct_num
for col in range(correct_num+1, current_num+1):
self.imp_tables[k].setColumnEditable(col, False)
self.imp_tables[k].shadeReadOnlyItems(True)
elif current_num < correct_num:
self.imp_current_num[k] = correct_num
for col in range(current_num+1, correct_num+1):
self.imp_tables[k].setColumnEditable(col, True)
num_param = self.imp_num_params[k]
self.imp_tables[k].setItemEditable(num_param+1, col, False)
self.imp_tables[k].shadeReadOnlyItems(True)
#TODO FIXME there is an update bug in the tables
# when the perturbation loads are deleted for example
# sometimes they are not really deleted, specially when the user
# does it faster
# ppiTable
old_num_plies = self.current_num_plies
new_num_plies = NUM_PLIES - self.laminateTable.getNumEmptyRowsAtBottom()
if old_num_plies != new_num_plies:
self.current_num_plies = new_num_plies
self.plot_ply_index.setRange(1, max(new_num_plies, 1))
if new_num_plies < old_num_plies:
for row in range(new_num_plies+1, old_num_plies+1):
for col in range(1, 5):
self.ppiTable.setItemEditable(row, col, False)
else:
for row in range(old_num_plies+1, new_num_plies+1):
for col in range(1, 5):
self.ppiTable.setItemEditable(row, col, True)
for row in range(1, max(old_num_plies, new_num_plies)+1):
val = self.laminateTable.getItemValue(row, 3)
self.ppiTable.setItemValue(row, 5, val)
if form.ppi_enabledKw.getValue():
self.ppiTable.enable()
else:
self.ppiTable.disable()
#
self.logcount += 1
if form.just_created_study:
form.loaded_study = True
form.just_created_study = False
self.extraUpdates()
if self.logcount > 20:
self.slowUpdates()
# cc, laminapropKeys, plyts, stack, laminaprop and allowables updates
self.update_database()
if self.save_cc_button.getState() == STATE_DOWN:
self.save_cc_button.setState(STATE_UP)
tmp = form.new_cc_nameKw.getValue()
form.new_cc_nameKw.setValue(rsc(tmp))
self.save_cc()
self.update_database(update_all=True)
if self.del_cc_button.getState() == STATE_DOWN:
self.del_cc_button.setState(STATE_UP)
self.del_cc()
form.ccKeyKw.setValue('deleted!')
self.update_database(update_all=True)
if self.save_laminaprop_button.getState() == STATE_DOWN:
self.save_laminaprop_button.setState(STATE_UP)
tmp = form.new_laminaprop_nameKw.getValue()
form.new_laminaprop_nameKw.setValue(rsc(tmp))
self.save_laminaprop()
self.update_database(update_all=True)
if self.del_laminaprop_button.getState() == STATE_DOWN:
self.del_laminaprop_button.setState(STATE_UP)
self.del_laminaprop()
form.laminapropKeyKw.setValue('deleted!')
self.update_database(update_all=True)
if self.save_allowables_button.getState() == STATE_DOWN:
self.save_allowables_button.setState(STATE_UP)
tmp = form.new_allowables_nameKw.getValue()
form.new_allowables_nameKw.setValue(rsc(tmp))
self.save_allowables()
self.update_database(update_all=True)
if self.del_allowables_button.getState() == STATE_DOWN:
self.del_allowables_button.setState(STATE_UP)
self.del_allowables()
form.allowablesKeyKw.setValue('deleted!')
self.update_database(update_all=True)
# apply Mid-Surface Imperfections
if self.apply_imp_ms.getState() == STATE_DOWN:
self.apply_imp_ms.setState(STATE_UP)
std_name = form.std_nameKw.getValue()
if not form.imp_msKw.getValue():
message('An imperfection must be selected!')
elif not form.loaded_study:
message('The study must be created or loaded first!')
else:
form.imp_ms_std_nameKw.setValue(std_name)
command = 'import gui_commands\n' +\
'reload(gui_commands)\n'
command += form.apply_imp_ms.getCommandString()
sendCommand(command, writeToReplay=False, writeToJournal=True)
# apply Thickness Imperfections
if self.apply_imp_t.getState() == STATE_DOWN:
self.apply_imp_t.setState(STATE_UP)
std_name = form.std_nameKw.getValue()
if form.imp_thickKw.getValue() == '':
message('An imperfection must be selected!')
elif not form.loaded_study:
message('The study must be created or loaded first!')
else:
form.imp_t_std_nameKw.setValue(std_name)
command = 'import gui_commands\n' +\
'reload(gui_commands)\n'
command += form.apply_imp_t.getCommandString()
sendCommand(command, writeToReplay=False, writeToJournal=True)
# save study
if self.save_std.getState() == STATE_DOWN:
self.save_std.setState(STATE_UP)
self.saveStudy()
# load study
if self.load_std.getState() == STATE_DOWN:
self.load_std.setState(STATE_UP)
message('Loading...')
self.logcount = 10000
std_name = form.std_to_postKw.getValue()
command = ('import gui_commands\n' +
'reload(gui_commands)\n' +
'gui_commands.load_study("{0}")\n'.format(std_name))
sendCommand(command)
reload(gui_commands)
if not gui_commands.load_study_gui(std_name, form):
message('Warning: The loaded study was not saved from the GUI. Layup and imperfection data may be missing.')
if std_name:
outpath = os.path.join(TMP_DIR, std_name)
else:
outpath = TMP_DIR
message('The DESICOS study "{0}.study" has been opened.'.format(
outpath))
message(' ')
form.loaded_study = True
outputs = os.path.join(outpath, 'outputs')
if not os.path.isdir(outputs):
os.makedirs(outputs)
os.chdir(outpath)
return
# changing variable widgets
if form.displ_controlledKw.getValue():
self.axial_displ.enable()
self.axial_load.disable()
self.axial_step.disable()
else:
self.axial_displ.disable()
self.axial_load.enable()
if form.separate_load_stepsKw.getValue():
self.axial_step.enable()
if form.separate_load_stepsKw.getValue():
self.art_damp1.enable()
self.damp_factor1.enable()
self.minInc1.enable()
self.initialInc1.enable()
self.maxInc1.enable()
self.maxNumInc1.enable()
self.pload_step.enable()
self.pressure_step.enable()
if not form.displ_controlledKw.getValue():
self.axial_step.enable()
else:
self.art_damp1.disable()
self.damp_factor1.disable()
self.minInc1.disable()
self.initialInc1.disable()
self.maxInc1.disable()
self.maxNumInc1.disable()
self.pload_step.disable()
self.pressure_step.disable()
self.axial_step.disable()
# Apply DLR boundary conditions
DLR_BC = {
'resin_add_BIR' : True,
'resin_add_BOR' : True,
'resin_add_TIR' : True,
'resin_add_TOR' : True,
'bc_fix_bottom_side_uR' : True,
'bc_fix_bottom_side_v' : False,
'bc_fix_bottom_side_u3' : False,
'bc_fix_top_side_uR' : True,
'bc_fix_top_side_v' : False,
'bc_fix_top_side_u3' : False}
if form.use_DLR_bcKw.getValue():
for key, value in DLR_BC.iteritems():
getattr(self, key).disable()
getattr(form, key+'Kw').setValue(value)
else:
for key in DLR_BC:
getattr(self, key).enable()
# plot opened conecyl
for i, plot_type_button in enumerate(self.plot_type_buttons):
if plot_type_button.getState() == STATE_DOWN:
plot_type_button.setState(STATE_UP)
reload(gui_plot)
gui_plot.plot_opened_conecyl(plot_type=(i+1))
if not form.loaded_study:
return
else:
if not form.post_outpathKw.getValue():
std_name = form.std_to_postKw.getValue()
if std_name:
outpath = os.path.join(TMP_DIR, std_name)
else:
outpath = TMP_DIR
form.post_outpathKw.setValue(outpath)
# post load shortening curve button
if self.post_ls_button.getState() == STATE_DOWN:
self.post_ls_button.setState(STATE_UP)
reload(gui_plot)
put_in_Excel = form.post_put_in_ExcelKw.getValue()
open_Excel = form.post_open_ExcelKw.getValue()
std_name = form.std_to_postKw.getValue()
gui_plot.plot_ls_curve(std_name, put_in_Excel, open_Excel)
# post knock-down curves
if self.post_kdf_button.getState() == STATE_DOWN:
self.post_kdf_button.setState(STATE_UP)
reload(gui_plot)
put_in_Excel = form.post_put_in_ExcelKw.getValue()
open_Excel = form.post_open_ExcelKw.getValue()
std_name = form.std_to_postKw.getValue()
gui_plot.plot_kdf_curve(std_name,
put_in_Excel, open_Excel,
configure_session=False)
# post stress analysis button
if self.post_stress_button.getState() == STATE_DOWN:
self.post_stress_button.setState(STATE_UP)
reload(gui_plot)
cc_name = form.model_to_postKw.getValue()
std_name = form.std_to_postKw.getValue()
gui_plot.plot_stress_analysis(std_name, cc_name)
# plot PPI button
if self.plot_ppi_button.getState() == STATE_DOWN:
self.plot_ppi_button.setState(STATE_UP)
reload(gui_plot)
cc_name = form.plot_imp_modelKw.getValue()
# ply_index is 1-based in GUI, 0-based in code
ply_index = form.plot_ply_indexKw.getValue() - 1
plot_type = int(form.plot_imp_typeKw.getValue()[-1])
std_name = form.std_to_postKw.getValue()
gui_plot.plot_ppi(std_name, cc_name, ply_index, plot_type)
# plot MSI button
if self.plot_msi_button.getState() == STATE_DOWN:
self.plot_msi_button.setState(STATE_UP)
reload(gui_plot)
cc_name = form.plot_imp_modelKw.getValue()
plot_type = int(form.plot_imp_typeKw.getValue()[-1])
std_name = form.std_to_postKw.getValue()
gui_plot.plot_msi(std_name, cc_name, plot_type)
# plot TI button
if self.plot_ti_button.getState() == STATE_DOWN:
self.plot_ti_button.setState(STATE_UP)
reload(gui_plot)
cc_name = form.plot_imp_modelKw.getValue()
plot_type = int(form.plot_imp_typeKw.getValue()[-1])
std_name = form.std_to_postKw.getValue()
gui_plot.plot_ti(std_name, cc_name, plot_type)
# run models
if self.exec_std.getState() == STATE_DOWN:
self.exec_std.setState(STATE_UP)
self.logcount = 10000
ncpus = form.ncpusKw.getValue()
std_name = form.std_to_postKw.getValue()
command = ('import __main__\n' +
'__main__.stds["{0}"].write_inputs()\n'.format(std_name))
sendCommand(command)
reload(gui_commands)
gui_commands.run_study(std_name, ncpus,
form.use_job_stopperKw.getValue())
# clear output folder
if self.clean_output.getState() == STATE_DOWN:
self.exec_std.setState(STATE_UP)
self.logcount = 10000
showAFXWarningDialog(self, 'Confirm Action?\n' +
'All output files will be deleted!',
AFXDialog.YES | AFXDialog.NO,
self.form, self.form.ID_DEL_OUT_FOLDER)
#if form.laKw.getValue() == False:
# self.la_beta.disable()
# self.la_omega.disable()
#else:
# self.la_beta.enable()
# self.la_omega.enable()
# default profile
# webBrowser url
#TODO add click-able link to pfh and desicos
if False:
#FXMAPFUNC(...
status = webBrowser.openWithURL('www.pfh.de')
status = webBrowser.openWithURL('www.desicos.eu')
return
def show(self):
# Note: This method is only necessary because the prototype
# application allows changes to be made in the dialog code and
# reloaded while the application is still running. Normally you
# would not need to have a show() method in your dialog.
# Resize the dialog to its default dimensions to account for
# any widget changes that may have been made.
#
self.resize(self.getDefaultWidth(), self.getDefaultHeight())
AFXDataDialog.show(self)
| [
"numpy.empty",
"gui_plot.plot_stress_analysis",
"os.path.isfile",
"gui_plot.plot_ti",
"gui_plot.plot_msi",
"os.path.join",
"os.chdir",
"gui_plot.plot_ls_curve",
"gui_commands.load_study_gui",
"gui_plot.plot_kdf_curve",
"desicos.conecylDB.fetch",
"gui_plot.plot_opened_conecyl",
"desicos.conec... | [((1500, 1538), 'numpy.empty', 'np.empty', (['(NUM_PLIES, 3)'], {'dtype': '"""|S50"""'}), "((NUM_PLIES, 3), dtype='|S50')\n", (1508, 1538), True, 'import numpy as np\n'), ((483, 504), 'ast.literal_eval', 'ast.literal_eval', (['tmp'], {}), '(tmp)\n', (499, 504), False, 'import ast\n'), ((5910, 5962), 'os.path.join', 'os.path.join', (['DAHOME', '"""gui"""', '"""icons"""', '"""geometry.png"""'], {}), "(DAHOME, 'gui', 'icons', 'geometry.png')\n", (5922, 5962), False, 'import os\n'), ((16984, 17039), 'os.path.join', 'os.path.join', (['DAHOME', '"""gui"""', '"""icons"""', '"""resin_rings.png"""'], {}), "(DAHOME, 'gui', 'icons', 'resin_rings.png')\n", (16996, 17039), False, 'import os\n'), ((19635, 19685), 'os.path.join', 'os.path.join', (['DAHOME', '"""gui"""', '"""icons"""', '"""axial2.png"""'], {}), "(DAHOME, 'gui', 'icons', 'axial2.png')\n", (19647, 19685), False, 'import os\n'), ((27976, 28032), 'os.path.join', 'os.path.join', (['DAHOME', '"""gui"""', '"""icons"""', '"""extra_height.png"""'], {}), "(DAHOME, 'gui', 'icons', 'extra_height.png')\n", (27988, 28032), False, 'import os\n'), ((29613, 29667), 'os.path.join', 'os.path.join', (['DAHOME', '"""gui"""', '"""icons"""', '"""ply_pieces.png"""'], {}), "(DAHOME, 'gui', 'icons', 'ply_pieces.png')\n", (29625, 29667), False, 'import os\n'), ((32948, 33006), 'os.path.join', 'os.path.join', (['DAHOME', '"""gui"""', '"""icons"""', '"""fiber_fraction.png"""'], {}), "(DAHOME, 'gui', 'icons', 'fiber_fraction.png')\n", (32960, 33006), False, 'import os\n'), ((40588, 40634), 'os.path.join', 'os.path.join', (['DAHOME', '"""gui"""', '"""icons"""', '"""la.png"""'], {}), "(DAHOME, 'gui', 'icons', 'la.png')\n", (40600, 40634), False, 'import os\n'), ((46970, 47025), 'os.path.join', 'os.path.join', (['DAHOME', '"""gui"""', '"""icons"""', '"""plot_type_1.png"""'], {}), "(DAHOME, 'gui', 'icons', 'plot_type_1.png')\n", (46982, 47025), False, 'import os\n'), ((47239, 47294), 'os.path.join', 'os.path.join', (['DAHOME', '"""gui"""', '"""icons"""', '"""plot_type_2.png"""'], {}), "(DAHOME, 'gui', 'icons', 'plot_type_2.png')\n", (47251, 47294), False, 'import os\n'), ((47508, 47563), 'os.path.join', 'os.path.join', (['DAHOME', '"""gui"""', '"""icons"""', '"""plot_type_3.png"""'], {}), "(DAHOME, 'gui', 'icons', 'plot_type_3.png')\n", (47520, 47563), False, 'import os\n'), ((47777, 47832), 'os.path.join', 'os.path.join', (['DAHOME', '"""gui"""', '"""icons"""', '"""plot_type_4.png"""'], {}), "(DAHOME, 'gui', 'icons', 'plot_type_4.png')\n", (47789, 47832), False, 'import os\n'), ((48046, 48101), 'os.path.join', 'os.path.join', (['DAHOME', '"""gui"""', '"""icons"""', '"""plot_type_5.png"""'], {}), "(DAHOME, 'gui', 'icons', 'plot_type_5.png')\n", (48058, 48101), False, 'import os\n'), ((48315, 48370), 'os.path.join', 'os.path.join', (['DAHOME', '"""gui"""', '"""icons"""', '"""plot_type_6.png"""'], {}), "(DAHOME, 'gui', 'icons', 'plot_type_6.png')\n", (48327, 48370), False, 'import os\n'), ((48717, 48764), 'os.path.join', 'os.path.join', (['DAHOME', '"""gui"""', '"""icons"""', '"""pfh.png"""'], {}), "(DAHOME, 'gui', 'icons', 'pfh.png')\n", (48729, 48764), False, 'import os\n'), ((49002, 49054), 'os.path.join', 'os.path.join', (['DAHOME', '"""gui"""', '"""icons"""', '"""desicos2.png"""'], {}), "(DAHOME, 'gui', 'icons', 'desicos2.png')\n", (49014, 49054), False, 'import os\n'), ((54688, 54707), 'os.listdir', 'os.listdir', (['TMP_DIR'], {}), '(TMP_DIR)\n', (54698, 54707), False, 'import os\n'), ((55874, 55920), 'os.path.join', 'os.path.join', (['TMP_DIR', 'std_name', '"""run_log.txt"""'], {}), "(TMP_DIR, std_name, 'run_log.txt')\n", (55886, 55920), False, 'import os\n'), ((55932, 55956), 'os.path.isfile', 'os.path.isfile', (['log_path'], {}), '(log_path)\n', (55946, 55956), False, 'import os\n'), ((27274, 27319), 'os.path.join', 'os.path.join', (['DAHOME', '"""gui"""', '"""icons"""', 'pngs[k]'], {}), "(DAHOME, 'gui', 'icons', pngs[k])\n", (27286, 27319), False, 'import os\n'), ((49554, 49583), 'desicos.conecylDB.fetch', 'fetch', (['"""ccs"""'], {'local_only': '(True)'}), "('ccs', local_only=True)\n", (49559, 49583), False, 'from desicos.conecylDB import fetch\n'), ((49610, 49630), 'desicos.conecylDB.fetch', 'fetch', (['"""laminaprops"""'], {}), "('laminaprops')\n", (49615, 49630), False, 'from desicos.conecylDB import fetch\n'), ((49656, 49675), 'desicos.conecylDB.fetch', 'fetch', (['"""allowables"""'], {}), "('allowables')\n", (49661, 49675), False, 'from desicos.conecylDB import fetch\n'), ((49699, 49711), 'desicos.conecylDB.fetch', 'fetch', (['"""ccs"""'], {}), "('ccs')\n", (49704, 49711), False, 'from desicos.conecylDB import fetch\n'), ((53352, 53386), 'desicos.conecylDB.save', 'conecylDB.save', (['"""ccs"""', 'name', 'value'], {}), "('ccs', name, value)\n", (53366, 53386), True, 'import desicos.conecylDB as conecylDB\n'), ((53472, 53501), 'desicos.conecylDB.delete', 'conecylDB.delete', (['"""ccs"""', 'name'], {}), "('ccs', name)\n", (53488, 53501), True, 'import desicos.conecylDB as conecylDB\n'), ((53809, 53851), 'desicos.conecylDB.save', 'conecylDB.save', (['"""laminaprops"""', 'name', 'value'], {}), "('laminaprops', name, value)\n", (53823, 53851), True, 'import desicos.conecylDB as conecylDB\n'), ((53953, 53990), 'desicos.conecylDB.delete', 'conecylDB.delete', (['"""laminaprops"""', 'name'], {}), "('laminaprops', name)\n", (53969, 53990), True, 'import desicos.conecylDB as conecylDB\n'), ((54208, 54249), 'desicos.conecylDB.save', 'conecylDB.save', (['"""allowables"""', 'name', 'value'], {}), "('allowables', name, value)\n", (54222, 54249), True, 'import desicos.conecylDB as conecylDB\n'), ((54351, 54387), 'desicos.conecylDB.delete', 'conecylDB.delete', (['"""allowables"""', 'name'], {}), "('allowables', name)\n", (54367, 54387), True, 'import desicos.conecylDB as conecylDB\n'), ((56827, 56840), 'desicos.abaqus.utils.remove_special_characters', 'rsc', (['std_name'], {}), '(std_name)\n', (56830, 56840), True, 'from desicos.abaqus.utils import remove_special_characters as rsc\n'), ((63681, 63713), 'os.path.join', 'os.path.join', (['outpath', '"""outputs"""'], {}), "(outpath, 'outputs')\n", (63693, 63713), False, 'import os\n'), ((63806, 63823), 'os.chdir', 'os.chdir', (['outpath'], {}), '(outpath)\n', (63814, 63823), False, 'import os\n'), ((66821, 66879), 'gui_plot.plot_ls_curve', 'gui_plot.plot_ls_curve', (['std_name', 'put_in_Excel', 'open_Excel'], {}), '(std_name, put_in_Excel, open_Excel)\n', (66843, 66879), False, 'import gui_plot\n'), ((67240, 67328), 'gui_plot.plot_kdf_curve', 'gui_plot.plot_kdf_curve', (['std_name', 'put_in_Excel', 'open_Excel'], {'configure_session': '(False)'}), '(std_name, put_in_Excel, open_Excel,\n configure_session=False)\n', (67263, 67328), False, 'import gui_plot\n'), ((67700, 67748), 'gui_plot.plot_stress_analysis', 'gui_plot.plot_stress_analysis', (['std_name', 'cc_name'], {}), '(std_name, cc_name)\n', (67729, 67748), False, 'import gui_plot\n'), ((68220, 68278), 'gui_plot.plot_ppi', 'gui_plot.plot_ppi', (['std_name', 'cc_name', 'ply_index', 'plot_type'], {}), '(std_name, cc_name, ply_index, plot_type)\n', (68237, 68278), False, 'import gui_plot\n'), ((68630, 68677), 'gui_plot.plot_msi', 'gui_plot.plot_msi', (['std_name', 'cc_name', 'plot_type'], {}), '(std_name, cc_name, plot_type)\n', (68647, 68677), False, 'import gui_plot\n'), ((69026, 69072), 'gui_plot.plot_ti', 'gui_plot.plot_ti', (['std_name', 'cc_name', 'plot_type'], {}), '(std_name, cc_name, plot_type)\n', (69042, 69072), False, 'import gui_plot\n'), ((70399, 70435), 'uti.webBrowser.openWithURL', 'webBrowser.openWithURL', (['"""www.pfh.de"""'], {}), "('www.pfh.de')\n", (70421, 70435), False, 'from uti import webBrowser\n'), ((70457, 70497), 'uti.webBrowser.openWithURL', 'webBrowser.openWithURL', (['"""www.desicos.eu"""'], {}), "('www.desicos.eu')\n", (70479, 70497), False, 'from uti import webBrowser\n'), ((59587, 59595), 'desicos.abaqus.utils.remove_special_characters', 'rsc', (['tmp'], {}), '(tmp)\n', (59590, 59595), True, 'from desicos.abaqus.utils import remove_special_characters as rsc\n'), ((60130, 60138), 'desicos.abaqus.utils.remove_special_characters', 'rsc', (['tmp'], {}), '(tmp)\n', (60133, 60138), True, 'from desicos.abaqus.utils import remove_special_characters as rsc\n'), ((60713, 60721), 'desicos.abaqus.utils.remove_special_characters', 'rsc', (['tmp'], {}), '(tmp)\n', (60716, 60721), True, 'from desicos.abaqus.utils import remove_special_characters as rsc\n'), ((63185, 63228), 'gui_commands.load_study_gui', 'gui_commands.load_study_gui', (['std_name', 'form'], {}), '(std_name, form)\n', (63212, 63228), False, 'import gui_commands\n'), ((63406, 63437), 'os.path.join', 'os.path.join', (['TMP_DIR', 'std_name'], {}), '(TMP_DIR, std_name)\n', (63418, 63437), False, 'import os\n'), ((63733, 63755), 'os.path.isdir', 'os.path.isdir', (['outputs'], {}), '(outputs)\n', (63746, 63755), False, 'import os\n'), ((63773, 63793), 'os.makedirs', 'os.makedirs', (['outputs'], {}), '(outputs)\n', (63784, 63793), False, 'import os\n'), ((66025, 66070), 'gui_plot.plot_opened_conecyl', 'gui_plot.plot_opened_conecyl', ([], {'plot_type': '(i + 1)'}), '(plot_type=i + 1)\n', (66053, 66070), False, 'import gui_plot\n'), ((66306, 66337), 'os.path.join', 'os.path.join', (['TMP_DIR', 'std_name'], {}), '(TMP_DIR, std_name)\n', (66318, 66337), False, 'import os\n')] |
# Copyright (c) <NAME>, <NAME>, and ZOZO Technologies, Inc. All rights reserved.
# Licensed under the Apache 2.0 License.
"""Dataset Class for Real-World Logged Bandit Feedback."""
from dataclasses import dataclass
from logging import getLogger, basicConfig, INFO
from pathlib import Path
from typing import Optional
import numpy as np
import pandas as pd
from scipy.stats import rankdata
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import check_random_state
from .base import BaseRealBanditDataset
from ..types import BanditFeedback
logger = getLogger(__name__)
basicConfig(level=INFO)
@dataclass
class OpenBanditDataset(BaseRealBanditDataset):
"""Class for loading and preprocessing Open Bandit Dataset.
Note
-----
Users are free to implement their own feature engineering by overriding the `pre_process` method.
Parameters
-----------
behavior_policy: str
Name of the behavior policy that generated the logged bandit feedback data.
Must be either 'random' or 'bts'.
campaign: str
One of the three possible campaigns considered in ZOZOTOWN, "all", "men", and "women".
data_path: Path, default=None
Path where the Open Bandit Dataset exists.
When `None` is given, this class downloads the example small-sized version of the dataset.
dataset_name: str, default='obd'
Name of the dataset.
References
------------
<NAME>, <NAME>, <NAME>, <NAME>.
"Open Bandit Dataset and Pipeline: Towards Realistic and Reproducible Off-Policy Evaluation.", 2020.
"""
behavior_policy: str
campaign: str
data_path: Optional[Path] = None
dataset_name: str = "obd"
def __post_init__(self) -> None:
"""Initialize Open Bandit Dataset Class."""
if self.behavior_policy not in [
"bts",
"random",
]:
raise ValueError(
f"behavior_policy must be either of 'bts' or 'random', but {self.behavior_policy} is given"
)
if self.campaign not in [
"all",
"men",
"women",
]:
raise ValueError(
f"campaign must be one of 'all', 'men', and 'women', but {self.campaign} is given"
)
if self.data_path is None:
logger.info(
"When `data_path` is not given, this class downloads the example small-sized version of the Open Bandit Dataset."
)
self.data_path = Path(__file__).parent / "obd"
else:
if not isinstance(self.data_path, Path):
raise ValueError("data_path must be a Path type")
self.data_path = self.data_path / self.behavior_policy / self.campaign
self.raw_data_file = f"{self.campaign}.csv"
self.load_raw_data()
self.pre_process()
@property
def n_rounds(self) -> int:
"""Total number of rounds contained in the logged bandit dataset."""
return self.data.shape[0]
@property
def n_actions(self) -> int:
"""Number of actions."""
return int(self.action.max() + 1)
@property
def dim_context(self) -> int:
"""Dimensions of context vectors."""
return self.context.shape[1]
@property
def len_list(self) -> int:
"""Length of recommendation lists."""
return int(self.position.max() + 1)
@classmethod
def calc_on_policy_policy_value_estimate(
cls,
behavior_policy: str,
campaign: str,
data_path: Optional[Path] = None,
test_size: float = 0.3,
is_timeseries_split: bool = False,
) -> float:
"""Calculate on-policy policy value estimate (used as a ground-truth policy value).
Parameters
----------
behavior_policy: str
Name of the behavior policy that generated the log data.
Must be either 'random' or 'bts'.
campaign: str
One of the three possible campaigns considered in ZOZOTOWN (i.e., "all", "men", and "women").
data_path: Path, default=None
Path where the Open Bandit Dataset exists.
When `None` is given, this class downloads the example small-sized version of the dataset.
test_size: float, default=0.3
If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to include in the test split.
is_timeseries_split: bool, default=False
If true, split the original logged bandit feedback data by time series.
Returns
---------
on_policy_policy_value_estimate: float
Policy value of the behavior policy estimated by on-policy estimation, i.e., :math:`\\mathbb{E}_{\\mathcal{D}} [r_t]`.
where :math:`\\mathbb{E}_{\\mathcal{D}}[\\cdot]` is the empirical average over :math:`T` observations in :math:`\\mathcal{D}`.
This parameter is used as a ground-truth policy value in the evaluation of OPE estimators.
"""
return (
cls(behavior_policy=behavior_policy, campaign=campaign, data_path=data_path)
.obtain_batch_bandit_feedback(
test_size=test_size, is_timeseries_split=is_timeseries_split
)["reward_test"]
.mean()
)
def load_raw_data(self) -> None:
"""Load raw open bandit dataset."""
self.data = pd.read_csv(self.data_path / self.raw_data_file, index_col=0)
self.item_context = pd.read_csv(
self.data_path / "item_context.csv", index_col=0
)
self.data.sort_values("timestamp", inplace=True)
self.action = self.data["item_id"].values
self.position = (rankdata(self.data["position"].values, "dense") - 1).astype(
int
)
self.reward = self.data["click"].values
self.pscore = self.data["propensity_score"].values
def pre_process(self) -> None:
"""Preprocess raw open bandit dataset.
Note
-----
This is the default feature engineering and please override this method to
implement your own preprocessing.
see https://github.com/st-tech/zr-obp/blob/master/examples/examples_with_obd/custom_dataset.py for example.
"""
user_cols = self.data.columns.str.contains("user_feature")
self.context = pd.get_dummies(
self.data.loc[:, user_cols], drop_first=True
).values
item_feature_0 = self.item_context["item_feature_0"]
item_feature_cat = self.item_context.drop("item_feature_0", 1).apply(
LabelEncoder().fit_transform
)
self.action_context = pd.concat([item_feature_cat, item_feature_0], 1).values
def obtain_batch_bandit_feedback(
self, test_size: float = 0.3, is_timeseries_split: bool = False
) -> BanditFeedback:
"""Obtain batch logged bandit feedback.
Parameters
-----------
test_size: float, default=0.3
If float, should be between 0.0 and 1.0 and represent the proportion of
the dataset to include in the evaluation split.
This argument matters only when `is_timeseries_split=True` (the out-sample case).
is_timeseries_split: bool, default=False
If true, split the original logged bandit feedback data by time series.
Returns
--------
bandit_feedback: BanditFeedback
A dictionary containing batch logged bandit feedback data collected by a behavior policy.
The keys of the dictionary are as follows.
- n_rounds: number of rounds (size) of the logged bandit data
- n_actions: number of actions (:math:`|\mathcal{A}|`)
- action: action variables sampled by a behavior policy
- position: positions where actions are recommended
- reward: reward variables
- pscore: action choice probabilities by a behavior policy
- context: context vectors such as user-related features and user-item affinity scores
- action_context: item-related context vectors
"""
if is_timeseries_split:
if not isinstance(test_size, float) or (test_size <= 0 or test_size >= 1):
raise ValueError(
f"test_size must be a float in the (0,1) interval, but {test_size} is given"
)
n_rounds_train = np.int(self.n_rounds * (1.0 - test_size))
return dict(
n_rounds=n_rounds_train,
n_actions=self.n_actions,
action=self.action[:n_rounds_train],
action_test=self.action[n_rounds_train:],
position=self.position[:n_rounds_train],
position_test=self.position[n_rounds_train:],
reward=self.reward[:n_rounds_train],
reward_test=self.reward[n_rounds_train:],
pscore=self.pscore[:n_rounds_train],
pscore_test=self.pscore[n_rounds_train:],
context=self.context[:n_rounds_train],
context_test=self.context[n_rounds_train:],
action_context=self.action_context,
)
else:
return dict(
n_rounds=self.n_rounds,
n_actions=self.n_actions,
action=self.action,
position=self.position,
reward=self.reward,
reward_test=self.reward,
pscore=self.pscore,
context=self.context,
action_context=self.action_context,
)
def sample_bootstrap_bandit_feedback(
self,
test_size: float = 0.3,
is_timeseries_split: bool = False,
random_state: Optional[int] = None,
) -> BanditFeedback:
"""Obtain bootstrap logged bandit feedback.
Parameters
-----------
test_size: float, default=0.3
If float, should be between 0.0 and 1.0 and represent the proportion of
the dataset to include in the evaluation split.
This argument matters only when `is_timeseries_split=True` (the out-sample case).
is_timeseries_split: bool, default=False
If true, split the original logged bandit feedback data by time series.
random_state: int, default=None
Controls the random seed in bootstrap sampling.
Returns
--------
bandit_feedback: BanditFeedback
A dictionary containing logged bandit feedback data sampled independently from the original data with replacement.
The keys of the dictionary are as follows.
- n_rounds: number of rounds (size) of the logged bandit data
- n_actions: number of actions
- action: action variables sampled by a behavior policy
- position: positions where actions are recommended by a behavior policy
- reward: reward variables
- pscore: action choice probabilities by a behavior policy
- context: context vectors such as user-related features and user-item affinity scores
- action_context: item-related context vectors
"""
bandit_feedback = self.obtain_batch_bandit_feedback(
test_size=test_size, is_timeseries_split=is_timeseries_split
)
n_rounds = bandit_feedback["n_rounds"]
random_ = check_random_state(random_state)
bootstrap_idx = random_.choice(np.arange(n_rounds), size=n_rounds, replace=True)
for key_ in ["action", "position", "reward", "pscore", "context"]:
bandit_feedback[key_] = bandit_feedback[key_][bootstrap_idx]
return bandit_feedback
| [
"sklearn.utils.check_random_state",
"logging.basicConfig",
"pandas.read_csv",
"pandas.get_dummies",
"scipy.stats.rankdata",
"sklearn.preprocessing.LabelEncoder",
"pathlib.Path",
"numpy.int",
"numpy.arange",
"pandas.concat",
"logging.getLogger"
] | [((569, 588), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (578, 588), False, 'from logging import getLogger, basicConfig, INFO\n'), ((589, 612), 'logging.basicConfig', 'basicConfig', ([], {'level': 'INFO'}), '(level=INFO)\n', (600, 612), False, 'from logging import getLogger, basicConfig, INFO\n'), ((5435, 5496), 'pandas.read_csv', 'pd.read_csv', (['(self.data_path / self.raw_data_file)'], {'index_col': '(0)'}), '(self.data_path / self.raw_data_file, index_col=0)\n', (5446, 5496), True, 'import pandas as pd\n'), ((5525, 5586), 'pandas.read_csv', 'pd.read_csv', (["(self.data_path / 'item_context.csv')"], {'index_col': '(0)'}), "(self.data_path / 'item_context.csv', index_col=0)\n", (5536, 5586), True, 'import pandas as pd\n'), ((11489, 11521), 'sklearn.utils.check_random_state', 'check_random_state', (['random_state'], {}), '(random_state)\n', (11507, 11521), False, 'from sklearn.utils import check_random_state\n'), ((6390, 6450), 'pandas.get_dummies', 'pd.get_dummies', (['self.data.loc[:, user_cols]'], {'drop_first': '(True)'}), '(self.data.loc[:, user_cols], drop_first=True)\n', (6404, 6450), True, 'import pandas as pd\n'), ((6700, 6748), 'pandas.concat', 'pd.concat', (['[item_feature_cat, item_feature_0]', '(1)'], {}), '([item_feature_cat, item_feature_0], 1)\n', (6709, 6748), True, 'import pandas as pd\n'), ((8472, 8513), 'numpy.int', 'np.int', (['(self.n_rounds * (1.0 - test_size))'], {}), '(self.n_rounds * (1.0 - test_size))\n', (8478, 8513), True, 'import numpy as np\n'), ((11561, 11580), 'numpy.arange', 'np.arange', (['n_rounds'], {}), '(n_rounds)\n', (11570, 11580), True, 'import numpy as np\n'), ((6631, 6645), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (6643, 6645), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((2520, 2534), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (2524, 2534), False, 'from pathlib import Path\n'), ((5741, 5788), 'scipy.stats.rankdata', 'rankdata', (["self.data['position'].values", '"""dense"""'], {}), "(self.data['position'].values, 'dense')\n", (5749, 5788), False, 'from scipy.stats import rankdata\n')] |
# example of calculating the frechet inception distance in Keras
import numpy
import glob
import os
from skimage.measure import compare_ssim
from PIL import Image
import cv2
import numpy as np
# calculate SSIM
def calculate_average_ssim(images1, images2):
ssim_sum=0
n = len(images1)
ssims_list = []
for i in range(0,n):
(ssim,diff) = compare_ssim(images1[i], images2[i], gaussian_weights=True, full=True, sigma=1.5, use_sample_covariance=False, multichannel=True)
ssims_list.append(ssim)
ssims_list = np.array(ssims_list)
mean,std = np.mean(ssims_list),np.std(ssims_list)
return mean,std
folder = "F:/Datasets/CRAG_LabServer/Test/Grades/1/1200_cropped/results_run4/images"
paths = glob.glob(os.path.join(folder,"*.png"))
original_images = []
generated_images = []
image_names = []
for path in paths:
if('outputs' in path):
imname = os.path.split(path)[1].split("-")
imname = "-".join([imname[0],imname[1]])
image_names.append(imname)
#image_names = ["H09-16145_A2H_E_1_1_grade_1_14_0_500"]
print(image_names)
#exit(0)
for imname in image_names:
or_imname = imname+"-targets.png"
gn_imname = imname+"-outputs.png"
#or_img = Image.open(os.path.join(folder,or_imname))
#or_img = numpy.asarray(or_img)
or_img = cv2.imread(os.path.join(folder,or_imname))
or_img = cv2.cvtColor(or_img, cv2.COLOR_BGR2RGB)
original_images.append(or_img)
#gn_img = Image.open(os.path.join(folder, gn_imname))
#gn_img = numpy.asarray(gn_img)
gn_img = cv2.imread(os.path.join(folder, gn_imname))
gn_img = cv2.cvtColor(gn_img, cv2.COLOR_BGR2RGB)
generated_images.append(gn_img)
print(len(original_images))
print(len(generated_images))
# fid between images1 and images1
ssim_avg,ssim_std = calculate_average_ssim(original_images, generated_images)
print("Average SSIM => ",ssim_avg)
print("STD SSIM => ",ssim_std) | [
"skimage.measure.compare_ssim",
"cv2.cvtColor",
"numpy.std",
"numpy.mean",
"numpy.array",
"os.path.split",
"os.path.join"
] | [((539, 559), 'numpy.array', 'np.array', (['ssims_list'], {}), '(ssims_list)\n', (547, 559), True, 'import numpy as np\n'), ((739, 768), 'os.path.join', 'os.path.join', (['folder', '"""*.png"""'], {}), "(folder, '*.png')\n", (751, 768), False, 'import os\n'), ((1362, 1401), 'cv2.cvtColor', 'cv2.cvtColor', (['or_img', 'cv2.COLOR_BGR2RGB'], {}), '(or_img, cv2.COLOR_BGR2RGB)\n', (1374, 1401), False, 'import cv2\n'), ((1602, 1641), 'cv2.cvtColor', 'cv2.cvtColor', (['gn_img', 'cv2.COLOR_BGR2RGB'], {}), '(gn_img, cv2.COLOR_BGR2RGB)\n', (1614, 1641), False, 'import cv2\n'), ((360, 493), 'skimage.measure.compare_ssim', 'compare_ssim', (['images1[i]', 'images2[i]'], {'gaussian_weights': '(True)', 'full': '(True)', 'sigma': '(1.5)', 'use_sample_covariance': '(False)', 'multichannel': '(True)'}), '(images1[i], images2[i], gaussian_weights=True, full=True,\n sigma=1.5, use_sample_covariance=False, multichannel=True)\n', (372, 493), False, 'from skimage.measure import compare_ssim\n'), ((575, 594), 'numpy.mean', 'np.mean', (['ssims_list'], {}), '(ssims_list)\n', (582, 594), True, 'import numpy as np\n'), ((595, 613), 'numpy.std', 'np.std', (['ssims_list'], {}), '(ssims_list)\n', (601, 613), True, 'import numpy as np\n'), ((1317, 1348), 'os.path.join', 'os.path.join', (['folder', 'or_imname'], {}), '(folder, or_imname)\n', (1329, 1348), False, 'import os\n'), ((1556, 1587), 'os.path.join', 'os.path.join', (['folder', 'gn_imname'], {}), '(folder, gn_imname)\n', (1568, 1587), False, 'import os\n'), ((894, 913), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (907, 913), False, 'import os\n')] |
import pandas as pd
import numpy as np
from sklearn import ensemble
from sklearn import metrics
from sklearn import model_selection
from functools import partial
from sklearn import decomposition
from sklearn import pipeline
from sklearn import preprocessing
import optuna
def optimize(trial, X, y):
criterion = trial.suggest_categorical("criterion", ["gini", "entropy"])
n_estimators = trial.suggest_int("n_estimators", 100, 500)
max_depth = trial.suggest_int("max_depth", 3, 10)
max_features = trial.suggest_uniform("max_features", 0.01, 1.0)
model = ensemble.RandomForestClassifier(n_estimators=n_estimators, max_depth=max_depth, max_features=max_features, criterion=criterion)
kf = model_selection.StratifiedKFold(n_splits=5)
accuracies = []
for idx in kf.split(X=X, y=y):
train_idx, test_idx = idx[0], idx[1]
xtrain = X[train_idx]
ytrain = y[train_idx]
xtest = X[test_idx]
ytest = y[test_idx]
model.fit(xtrain, ytrain)
preds = model.predict(xtest)
fold_acc = metrics.accuracy_score(ytest, preds)
accuracies.append(fold_acc)
return -1.0 * np.mean(accuracies)
if __name__ == "__main__":
df = pd.read_csv("../data/train.csv")
X = df.drop(["price_range"], axis=1).values
y = df["price_range"].values
optimization_function = partial(optimize, X=X, y=y)
study = optuna.create_study(direction="minimize")
study.optimize(optimization_function, n_trials=15)
| [
"sklearn.ensemble.RandomForestClassifier",
"functools.partial",
"pandas.read_csv",
"sklearn.metrics.accuracy_score",
"numpy.mean",
"sklearn.model_selection.StratifiedKFold",
"optuna.create_study"
] | [((578, 710), 'sklearn.ensemble.RandomForestClassifier', 'ensemble.RandomForestClassifier', ([], {'n_estimators': 'n_estimators', 'max_depth': 'max_depth', 'max_features': 'max_features', 'criterion': 'criterion'}), '(n_estimators=n_estimators, max_depth=\n max_depth, max_features=max_features, criterion=criterion)\n', (609, 710), False, 'from sklearn import ensemble\n'), ((715, 758), 'sklearn.model_selection.StratifiedKFold', 'model_selection.StratifiedKFold', ([], {'n_splits': '(5)'}), '(n_splits=5)\n', (746, 758), False, 'from sklearn import model_selection\n'), ((1223, 1255), 'pandas.read_csv', 'pd.read_csv', (['"""../data/train.csv"""'], {}), "('../data/train.csv')\n", (1234, 1255), True, 'import pandas as pd\n'), ((1366, 1393), 'functools.partial', 'partial', (['optimize'], {'X': 'X', 'y': 'y'}), '(optimize, X=X, y=y)\n', (1373, 1393), False, 'from functools import partial\n'), ((1406, 1447), 'optuna.create_study', 'optuna.create_study', ([], {'direction': '"""minimize"""'}), "(direction='minimize')\n", (1425, 1447), False, 'import optuna\n'), ((1068, 1104), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['ytest', 'preds'], {}), '(ytest, preds)\n', (1090, 1104), False, 'from sklearn import metrics\n'), ((1164, 1183), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (1171, 1183), True, 'import numpy as np\n')] |
import numpy as np
# noinspection PyPep8Naming
import torch.nn.functional as F
import torch.nn as nn
import torch
from lib.distributions import log_standard_normal
from lib.flows import cpflows
from lib.made import MADE, CMADE
from lib.naf import sigmoid_flow
_scaling_min = 0.001
# noinspection PyUnusedLocal
class ActNorm(torch.nn.Module):
""" ActNorm layer with data-dependant init."""
def __init__(self, num_features, logscale_factor=1., scale=1., learn_scale=True):
super(ActNorm, self).__init__()
self.initialized = False
self.num_features = num_features
self.register_parameter('b', nn.Parameter(torch.zeros(1, num_features, 1), requires_grad=True))
self.learn_scale = learn_scale
if learn_scale:
self.logscale_factor = logscale_factor
self.scale = scale
self.register_parameter('logs', nn.Parameter(torch.zeros(1, num_features, 1), requires_grad=True))
def forward_transform(self, x, logdet=0):
input_shape = x.size()
x = x.view(input_shape[0], input_shape[1], -1)
if not self.initialized:
self.initialized = True
# noinspection PyShadowingNames
def unsqueeze(x):
return x.unsqueeze(0).unsqueeze(-1).detach()
# Compute the mean and variance
sum_size = x.size(0) * x.size(-1)
b = -torch.sum(x, dim=(0, -1)) / sum_size
self.b.data.copy_(unsqueeze(b).data)
if self.learn_scale:
var = unsqueeze(torch.sum((x + unsqueeze(b)) ** 2, dim=(0, -1)) / sum_size)
logs = torch.log(self.scale / (torch.sqrt(var) + 1e-6)) / self.logscale_factor
self.logs.data.copy_(logs.data)
b = self.b
output = x + b
if self.learn_scale:
logs = self.logs * self.logscale_factor
scale = torch.exp(logs) + _scaling_min
output = output * scale
dlogdet = torch.sum(torch.log(scale)) * x.size(-1) # c x h
return output.view(input_shape), logdet + dlogdet
else:
return output.view(input_shape), logdet
def reverse(self, y, **kwargs):
assert self.initialized
input_shape = y.size()
y = y.view(input_shape[0], input_shape[1], -1)
logs = self.logs * self.logscale_factor
b = self.b
scale = torch.exp(logs) + _scaling_min
x = y / scale - b
return x.view(input_shape)
def extra_repr(self):
return f"{self.num_features}"
# noinspection PyUnusedLocal
class LayerActnorm(torch.nn.Module):
def __init__(self):
super(LayerActnorm, self).__init__()
self.flow = SequentialFlow([Unsqueeze(1), ActNorm(1), Squeeze(1)])
def forward_transform(self, x, logdet=0):
return self.flow.forward_transform(x, logdet, None)
def reverse(self, y, **kargs):
return self.flow.reverse(y)
class ActNormNoLogdet(ActNorm):
def forward(self, x):
return super(ActNormNoLogdet, self).forward_transform(x)[0]
# noinspection PyUnusedLocal
class Unsqueeze(torch.nn.Module):
def __init__(self, dim):
super(Unsqueeze, self).__init__()
self.dim = dim
def forward_transform(self, x, logdet=0):
return x.unsqueeze(self.dim), logdet
def reverse(self, x, **kargs):
return x.squeeze(self.dim)
# noinspection PyUnusedLocal
class Squeeze(torch.nn.Module):
def __init__(self, dim):
super(Squeeze, self).__init__()
self.dim = dim
def forward_transform(self, x, logdet=0):
return x.squeeze(self.dim), logdet
def reverse(self, x, **kargs):
return x.unsqueeze(self.dim)
# noinspection PyPep8Naming
class SequentialFlow(torch.nn.Module):
def __init__(self, flows):
super(SequentialFlow, self).__init__()
self.flows = torch.nn.ModuleList(flows)
def forward_transform(self, x, logdet=0, context=None, extra=None,itr =0):
for flow in self.flows:
if isinstance(flow, cpflows.DeepConvexFlow) or isinstance(flow, NAFDSF):
x, logdet = flow.forward_transform(x, logdet,
context=context,
extra=extra, itr =itr)
else:
prev_logdet = logdet
x, logdet = flow.forward_transform(x, logdet)
if extra is not None and len(extra) > 0:
extra[0] = extra[0] + (logdet - prev_logdet).detach()
return x, logdet
def reverse(self, x, **kwargs):
# noinspection PyTypeChecker
for flow in self.flows[::-1]:
x = flow.reverse(x, **kwargs)
return x
def logp(self, x, context=None, extra=None, itr =0):
z, logdet = self.forward_transform(x, context=context, extra=extra, itr = itr)
logp0 = log_standard_normal(z).sum(-1)
if extra is not None and len(extra) > 0:
extra[0] = extra[0] + logp0.detach()
return logp0 + logdet
def plot_logp(self, b=5, n=100):
"""plotting 2D density"""
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
x1 = torch.linspace(-b, b, n)
x2 = torch.linspace(-b, b, n)
X2, X1 = torch.meshgrid(x1, x2)
data = torch.cat([X1.flatten().unsqueeze(1), X2.flatten().unsqueeze(1)], 1)
if torch.cuda.is_available():
data = data.cuda()
p = torch.exp(self.logp(data).cpu()).data.numpy()
plt.imshow(p.reshape(n, n)[::-1], interpolation='gaussian')
plt.axis('off')
class Reverse(nn.Module):
def __init__(self, flow):
super().__init__()
self.flow = flow
def forward_transform(self, *args, **kwargs):
return self.flow.reverse(*args, **kwargs)
def reverse(self, *args, **kwargs):
return self.flow.forward_transform(*args, **kwargs)
# noinspection PyMethodMayBeStatic,PyUnusedLocal
class Flatten(nn.Module):
def __init__(self, shape):
super().__init__()
self.shape = shape
def forward_transform(self, x, logdet=None, **kwargs):
flat_x = x.reshape(x.shape[0], -1)
if logdet is None:
return flat_x
else:
return flat_x, logdet
def reverse(self, flat_x, logdet=None, **kwargs):
x = flat_x.reshape(flat_x.shape[0], *self.shape)
if logdet is None:
return x
else:
return x, logdet
def extra_repr(self):
return f"original shape={self.shape}"
# noinspection PyUnusedLocal
class SqueezeLayer(nn.Module):
def __init__(self, downscale_factor):
super(SqueezeLayer, self).__init__()
self.downscale_factor = downscale_factor
def forward_transform(self, x, logdet=None, **kwargs):
squeeze_x = squeeze(x, self.downscale_factor)
if logdet is None:
return squeeze_x
else:
return squeeze_x, logdet
def reverse(self, y, logdet=None, **kwargs):
unsqueeze_y = unsqueeze(y, self.downscale_factor)
if logdet is None:
return unsqueeze_y
else:
return unsqueeze_y, logdet
def unsqueeze(x, upscale_factor=2):
return torch.pixel_shuffle(x, upscale_factor)
def squeeze(x, downscale_factor=2):
"""
[:, C, H*r, W*r] -> [:, C*r^2, H, W]
"""
batch_size, in_channels, in_height, in_width = x.shape
out_channels = in_channels * (downscale_factor**2)
out_height = in_height // downscale_factor
out_width = in_width // downscale_factor
input_view = x.reshape(batch_size, in_channels, out_height, downscale_factor, out_width, downscale_factor)
output = input_view.permute(0, 1, 3, 5, 2, 4)
return output.reshape(batch_size, out_channels, out_height, out_width)
# noinspection PyUnusedLocal
class InvertibleLinear(nn.Module):
def __init__(self, dim):
super(InvertibleLinear, self).__init__()
self.dim = dim
self.weight = nn.Parameter(torch.eye(dim)[torch.randperm(dim)])
def forward_transform(self, x, logdet=None, **kwargs):
y = F.linear(x, self.weight)
if logdet is None:
return y
else:
return y, logdet + self._logdetgrad
def reverse(self, y, **kwargs):
x = F.linear(y, self.weight.inverse())
return x
@property
def _logdetgrad(self):
return torch.slogdet(self.weight)[1]
def extra_repr(self):
return 'dim={}'.format(self.dim)
# noinspection PyUnusedLocal,PyPep8Naming
class Invertible1x1Conv(nn.Module):
def __init__(self, dim):
super(Invertible1x1Conv, self).__init__()
self.dim = dim
# Grab the weight and bias from a randomly initialized Conv2d.
m = nn.Conv2d(dim, dim, kernel_size=1)
W = m.weight.clone().detach().reshape(dim, dim)
LU, pivots = torch.lu(W)
P, _, _ = torch.lu_unpack(LU, pivots)
s = torch.diag(LU)
# noinspection PyTypeChecker
LU = torch.where(torch.eye(dim) == 0, LU, torch.zeros_like(LU))
self.register_buffer("P", P)
self.register_buffer("s_sign", torch.sign(s))
self.register_parameter("s_log", nn.Parameter(torch.log(torch.abs(s) + 1e-3)))
self.register_parameter("LU", nn.Parameter(LU))
@property
def weight(self):
L = torch.tril(self.LU, -1) + torch.eye(self.dim).to(self.LU)
U = torch.triu(self.LU, 1) + torch.diagflat(torch.exp(self.s_log) * self.s_sign)
return torch.mm(self.P, torch.mm(L, U))
def forward_transform(self, x, logdet=None, **kwargs):
y = F.conv2d(x, self.weight.view(self.dim, self.dim, 1, 1))
if logdet is None:
return y
else:
return y, logdet + self._logdetgrad.expand_as(logdet) * x.shape[2] * x.shape[3]
def reverse(self, y, **kwargs):
return F.conv2d(y, self.weight.inverse().view(self.dim, self.dim, 1, 1))
@property
def _logdetgrad(self):
return torch.sum(self.s_log)
def extra_repr(self):
return 'dim={}'.format(self.dim)
# noinspection PyUnusedLocal
class LinearIAF(nn.Module):
def __init__(self, dim, natural_ordering=True):
super(LinearIAF, self).__init__()
self.made = MADE(dim, [], dim*2, num_masks=1, natural_ordering=natural_ordering, activation=torch.nn.Identity)
self.made.net[-1].weight.data.uniform_(-0.001, 0.001)
self.made.net[-1].bias[:dim].data.zero_()
self.made.net[-1].bias[dim:].data.zero_().add_(np.log(np.exp(1) - 1))
def forward(self, x):
return self.forward_transform(x)
def forward_transform(self, x, logdet=None, **kwargs):
m, ls = torch.chunk(self.made(x), 2, 1)
s = torch.nn.functional.softplus(ls)
y = m + s * x
if logdet is None:
return y
else:
return y, logdet + torch.log(s + 1e-8).sum(1)
# noinspection PyUnusedLocal
class IAF(nn.Module):
def __init__(self, dim, dimh=16, num_hidden_layers=2, natural_ordering=True, activation=torch.nn.ReLU()):
super(IAF, self).__init__()
self.dim = dim
self.dimh = dimh
self.num_hidden_layers = num_hidden_layers
hidden_sizes = [dimh] * num_hidden_layers
self.made = MADE(dim, hidden_sizes, dim*2, num_masks=1, natural_ordering=natural_ordering,
activation=activation)
self.made.net[-1].weight.data.uniform_(-0.001, 0.001)
self.made.net[-1].bias[:dim].data.zero_()
self.made.net[-1].bias[dim:].data.zero_().add_(np.log(np.exp(1) - 1))
def forward(self, x):
return self.forward_transform(x)
def forward_transform(self, x, logdet=None, **kwargs):
m, ls = torch.chunk(self.made(x), 2, 1)
s = torch.nn.functional.softplus(ls)
y = m + s * x
if logdet is None:
return y
else:
return y, logdet + torch.log(s + 1e-8).sum(1)
# noinspection PyUnusedLocal
class NAFDSF(nn.Module):
def __init__(self, dim, dimh=16, num_hidden_layers=2, natural_ordering=True, ndim=4, dimc=0,
activation=torch.nn.ReLU()):
super(NAFDSF, self).__init__()
self.dim = dim
self.dimh = dimh
self.dimc = dimc
self.ndim = ndim
self.num_hidden_layers = num_hidden_layers
hidden_sizes = [dimh] * num_hidden_layers
if dimc == 0:
self.made = MADE(dim, hidden_sizes, dim*ndim*3, num_masks=1, natural_ordering=natural_ordering,
activation=activation)
self.made.net[-1].weight.data.uniform_(-0.001, 0.001)
self.made.net[-1].bias.data.zero_()
self.made.net[-1].bias[:dim].data.zero_().add_(np.log(np.exp(1) - 1))
else:
# note: there's some flexibility in the design of how to condition on the context
self.context_net = nn.Sequential(
nn.Linear(dimc, dimh),
activation
)
self.made = CMADE(dim, hidden_sizes, dim*ndim*3, dimc=dimh, num_masks=1, natural_ordering=natural_ordering,
activation=activation)
self.made.layers[-1].layer.weight.data.uniform_(-0.001, 0.001)
self.made.layers[-1].layer.bias.data.zero_()
self.made.layers[-1].layer.bias[:dim].data.zero_().add_(np.log(np.exp(1) - 1))
def forward(self, x):
return self.forward_transform(x)
def forward_transform(self, x, logdet=None, context=None, **kwargs):
if self.dimc == 0:
params = self.made(x).view(-1, self.ndim*3, self.dim).permute(0, 2, 1)
else:
params = self.made(x, self.context_net(context)).view(-1, self.ndim * 3, self.dim).permute(0, 2, 1)
y, dlogdet = sigmoid_flow(x, 0, self.ndim, params)
if logdet is None:
return y
else:
return y, logdet + dlogdet
| [
"torch.eye",
"torch.sqrt",
"lib.made.MADE",
"torch.mm",
"torch.slogdet",
"numpy.exp",
"torch.pixel_shuffle",
"torch.diag",
"torch.sign",
"torch.exp",
"torch.triu",
"torch.nn.Linear",
"torch.zeros",
"torch.log",
"torch.nn.Parameter",
"torch.zeros_like",
"torch.nn.ModuleList",
"torch... | [((7325, 7363), 'torch.pixel_shuffle', 'torch.pixel_shuffle', (['x', 'upscale_factor'], {}), '(x, upscale_factor)\n', (7344, 7363), False, 'import torch\n'), ((3902, 3928), 'torch.nn.ModuleList', 'torch.nn.ModuleList', (['flows'], {}), '(flows)\n', (3921, 3928), False, 'import torch\n'), ((5240, 5261), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (5254, 5261), False, 'import matplotlib\n'), ((5275, 5299), 'torch.linspace', 'torch.linspace', (['(-b)', 'b', 'n'], {}), '(-b, b, n)\n', (5289, 5299), False, 'import torch\n'), ((5313, 5337), 'torch.linspace', 'torch.linspace', (['(-b)', 'b', 'n'], {}), '(-b, b, n)\n', (5327, 5337), False, 'import torch\n'), ((5355, 5377), 'torch.meshgrid', 'torch.meshgrid', (['x1', 'x2'], {}), '(x1, x2)\n', (5369, 5377), False, 'import torch\n'), ((5473, 5498), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5496, 5498), False, 'import torch\n'), ((5665, 5680), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (5673, 5680), True, 'import matplotlib.pyplot as plt\n'), ((8216, 8240), 'torch.nn.functional.linear', 'F.linear', (['x', 'self.weight'], {}), '(x, self.weight)\n', (8224, 8240), True, 'import torch.nn.functional as F\n'), ((8874, 8908), 'torch.nn.Conv2d', 'nn.Conv2d', (['dim', 'dim'], {'kernel_size': '(1)'}), '(dim, dim, kernel_size=1)\n', (8883, 8908), True, 'import torch.nn as nn\n'), ((8986, 8997), 'torch.lu', 'torch.lu', (['W'], {}), '(W)\n', (8994, 8997), False, 'import torch\n'), ((9016, 9043), 'torch.lu_unpack', 'torch.lu_unpack', (['LU', 'pivots'], {}), '(LU, pivots)\n', (9031, 9043), False, 'import torch\n'), ((9057, 9071), 'torch.diag', 'torch.diag', (['LU'], {}), '(LU)\n', (9067, 9071), False, 'import torch\n'), ((10117, 10138), 'torch.sum', 'torch.sum', (['self.s_log'], {}), '(self.s_log)\n', (10126, 10138), False, 'import torch\n'), ((10381, 10485), 'lib.made.MADE', 'MADE', (['dim', '[]', '(dim * 2)'], {'num_masks': '(1)', 'natural_ordering': 'natural_ordering', 'activation': 'torch.nn.Identity'}), '(dim, [], dim * 2, num_masks=1, natural_ordering=natural_ordering,\n activation=torch.nn.Identity)\n', (10385, 10485), False, 'from lib.made import MADE, CMADE\n'), ((10858, 10890), 'torch.nn.functional.softplus', 'torch.nn.functional.softplus', (['ls'], {}), '(ls)\n', (10886, 10890), False, 'import torch\n'), ((11179, 11194), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (11192, 11194), False, 'import torch\n'), ((11402, 11510), 'lib.made.MADE', 'MADE', (['dim', 'hidden_sizes', '(dim * 2)'], {'num_masks': '(1)', 'natural_ordering': 'natural_ordering', 'activation': 'activation'}), '(dim, hidden_sizes, dim * 2, num_masks=1, natural_ordering=\n natural_ordering, activation=activation)\n', (11406, 11510), False, 'from lib.made import MADE, CMADE\n'), ((11907, 11939), 'torch.nn.functional.softplus', 'torch.nn.functional.softplus', (['ls'], {}), '(ls)\n', (11935, 11939), False, 'import torch\n'), ((12264, 12279), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (12277, 12279), False, 'import torch\n'), ((13927, 13964), 'lib.naf.sigmoid_flow', 'sigmoid_flow', (['x', '(0)', 'self.ndim', 'params'], {}), '(x, 0, self.ndim, params)\n', (13939, 13964), False, 'from lib.naf import sigmoid_flow\n'), ((2412, 2427), 'torch.exp', 'torch.exp', (['logs'], {}), '(logs)\n', (2421, 2427), False, 'import torch\n'), ((8509, 8535), 'torch.slogdet', 'torch.slogdet', (['self.weight'], {}), '(self.weight)\n', (8522, 8535), False, 'import torch\n'), ((9159, 9179), 'torch.zeros_like', 'torch.zeros_like', (['LU'], {}), '(LU)\n', (9175, 9179), False, 'import torch\n'), ((9258, 9271), 'torch.sign', 'torch.sign', (['s'], {}), '(s)\n', (9268, 9271), False, 'import torch\n'), ((9398, 9414), 'torch.nn.Parameter', 'nn.Parameter', (['LU'], {}), '(LU)\n', (9410, 9414), True, 'import torch.nn as nn\n'), ((9465, 9488), 'torch.tril', 'torch.tril', (['self.LU', '(-1)'], {}), '(self.LU, -1)\n', (9475, 9488), False, 'import torch\n'), ((9535, 9557), 'torch.triu', 'torch.triu', (['self.LU', '(1)'], {}), '(self.LU, 1)\n', (9545, 9557), False, 'import torch\n'), ((9644, 9658), 'torch.mm', 'torch.mm', (['L', 'U'], {}), '(L, U)\n', (9652, 9658), False, 'import torch\n'), ((12566, 12681), 'lib.made.MADE', 'MADE', (['dim', 'hidden_sizes', '(dim * ndim * 3)'], {'num_masks': '(1)', 'natural_ordering': 'natural_ordering', 'activation': 'activation'}), '(dim, hidden_sizes, dim * ndim * 3, num_masks=1, natural_ordering=\n natural_ordering, activation=activation)\n', (12570, 12681), False, 'from lib.made import MADE, CMADE\n'), ((13156, 13282), 'lib.made.CMADE', 'CMADE', (['dim', 'hidden_sizes', '(dim * ndim * 3)'], {'dimc': 'dimh', 'num_masks': '(1)', 'natural_ordering': 'natural_ordering', 'activation': 'activation'}), '(dim, hidden_sizes, dim * ndim * 3, dimc=dimh, num_masks=1,\n natural_ordering=natural_ordering, activation=activation)\n', (13161, 13282), False, 'from lib.made import MADE, CMADE\n'), ((649, 680), 'torch.zeros', 'torch.zeros', (['(1)', 'num_features', '(1)'], {}), '(1, num_features, 1)\n', (660, 680), False, 'import torch\n'), ((1906, 1921), 'torch.exp', 'torch.exp', (['logs'], {}), '(logs)\n', (1915, 1921), False, 'import torch\n'), ((4935, 4957), 'lib.distributions.log_standard_normal', 'log_standard_normal', (['z'], {}), '(z)\n', (4954, 4957), False, 'from lib.distributions import log_standard_normal\n'), ((8107, 8121), 'torch.eye', 'torch.eye', (['dim'], {}), '(dim)\n', (8116, 8121), False, 'import torch\n'), ((8122, 8141), 'torch.randperm', 'torch.randperm', (['dim'], {}), '(dim)\n', (8136, 8141), False, 'import torch\n'), ((9134, 9148), 'torch.eye', 'torch.eye', (['dim'], {}), '(dim)\n', (9143, 9148), False, 'import torch\n'), ((13068, 13089), 'torch.nn.Linear', 'nn.Linear', (['dimc', 'dimh'], {}), '(dimc, dimh)\n', (13077, 13089), True, 'import torch.nn as nn\n'), ((905, 936), 'torch.zeros', 'torch.zeros', (['(1)', 'num_features', '(1)'], {}), '(1, num_features, 1)\n', (916, 936), False, 'import torch\n'), ((1406, 1431), 'torch.sum', 'torch.sum', (['x'], {'dim': '(0, -1)'}), '(x, dim=(0, -1))\n', (1415, 1431), False, 'import torch\n'), ((2005, 2021), 'torch.log', 'torch.log', (['scale'], {}), '(scale)\n', (2014, 2021), False, 'import torch\n'), ((9491, 9510), 'torch.eye', 'torch.eye', (['self.dim'], {}), '(self.dim)\n', (9500, 9510), False, 'import torch\n'), ((9575, 9596), 'torch.exp', 'torch.exp', (['self.s_log'], {}), '(self.s_log)\n', (9584, 9596), False, 'import torch\n'), ((10654, 10663), 'numpy.exp', 'np.exp', (['(1)'], {}), '(1)\n', (10660, 10663), True, 'import numpy as np\n'), ((11703, 11712), 'numpy.exp', 'np.exp', (['(1)'], {}), '(1)\n', (11709, 11712), True, 'import numpy as np\n'), ((9337, 9349), 'torch.abs', 'torch.abs', (['s'], {}), '(s)\n', (9346, 9349), False, 'import torch\n'), ((12882, 12891), 'numpy.exp', 'np.exp', (['(1)'], {}), '(1)\n', (12888, 12891), True, 'import numpy as np\n'), ((13512, 13521), 'numpy.exp', 'np.exp', (['(1)'], {}), '(1)\n', (13518, 13521), True, 'import numpy as np\n'), ((11006, 11026), 'torch.log', 'torch.log', (['(s + 1e-08)'], {}), '(s + 1e-08)\n', (11015, 11026), False, 'import torch\n'), ((12055, 12075), 'torch.log', 'torch.log', (['(s + 1e-08)'], {}), '(s + 1e-08)\n', (12064, 12075), False, 'import torch\n'), ((1665, 1680), 'torch.sqrt', 'torch.sqrt', (['var'], {}), '(var)\n', (1675, 1680), False, 'import torch\n')] |
import numpy as np
if __name__=='__main__':
T = 4000
d = 1000
s = 10
K = 2
delta_vals = np.logspace(-3,1,10)
eps_vals = np.logspace(-3,1,10)
iters = 20
print("cd ../")
for i in range(len(delta_vals)):
print("python3 -W ignore LimeCB.py --T %d --d %d --s %d --K %d --iters %d --param %0.3f --alg %s --noise 1.0 --base linucb" % (T, d, s, K, iters, eps_vals[i], 'limecb'))
print("python3 -W ignore LimeCB.py --T %d --d %d --s %d --K %d --iters %d --param %0.3f --alg %s --noise 1.0 --base linucb" % (T, d, s, K, iters, eps_vals[i], 'oracle'))
print("python3 -W ignore LimeCB.py --T %d --d %d --s %d --K %d --iters %d --param %0.3f --alg %s --noise 1.0 --base minimonster" % (T, d, s, K, iters, eps_vals[i], 'limecb'))
print("python3 -W ignore LimeCB.py --T %d --d %d --s %d --K %d --iters %d --param %0.3f --alg %s --noise 1.0 --base minimonster" % (T, d, s, K, iters, eps_vals[i], 'oracle'))
print("python3 -W ignore LimeCB.py --T %d --d %d --s %d --K %d --iters %d --param %0.3f --alg %s --noise 1.0" % (T, d, s, K, iters, delta_vals[i], 'linucb'))
| [
"numpy.logspace"
] | [((110, 132), 'numpy.logspace', 'np.logspace', (['(-3)', '(1)', '(10)'], {}), '(-3, 1, 10)\n', (121, 132), True, 'import numpy as np\n'), ((146, 168), 'numpy.logspace', 'np.logspace', (['(-3)', '(1)', '(10)'], {}), '(-3, 1, 10)\n', (157, 168), True, 'import numpy as np\n')] |
# This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
# example which maximizes the sum of a list of integers
# each of which can be 0 or 1
import random
import time
from deap import base
from deap import creator
from deap import tools
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("Individual", list, typecode='i', fitness=creator.FitnessMin)
toolbox = base.Toolbox()
from MPDA_decode.instance import Instance
from MPDA_decode.MPDA_decode_discrete import MPDA_Decode_Discrete_NB,MPDA_Decode_Discrete_Base,MPDA_Decode_Discrete_RC
insName = '14_14_ECCENTRIC_RANDOMCLUSTERED_SVLCV_LVLCV_thre0.1MPDAins.dat'
# insName = '11_8_RANDOMCLUSTERED_CENTRAL_SVSCV_LVSCV_thre0.1MPDAins.dat'
# insName = '20_20_CLUSTERED_RANDOM_QUADRANT_LVSCV_thre0.1MPDAins.dat'
insName = '29_29_CLUSTERED_ECCENTRIC_LVLCV_SVSCV_thre0.1MPDAins.dat'
ins = Instance('.\\benchmark\\' + insName)
IND_ROBNUM = ins.robNum
IND_TASKNUM = ins.taskNum
MPDA_Decode_Discrete_Base._ins = ins
MPDA_Decode_Discrete_NB._ins = ins
MPDA_Decode_Discrete_RC._ins = ins
print(ins)
def mpda_init_encode(robNum,taskNum):
lstRes = []
for robID in range(robNum):
permLst = [x for x in range(taskNum)]
random.shuffle(permLst)
lstRes.extend(permLst)
return lstRes
import numpy as np
def mpda_eval_discrete_nb(individual):
encode = np.zeros((ins.robNum, ins.taskNum), dtype=int)
i = 0
for robID in range(IND_ROBNUM):
for taskID in range(IND_TASKNUM):
encode[robID][taskID] = individual[i]
i += 1
mpda_decode_nb = MPDA_Decode_Discrete_NB()
# print(encode)
ms = mpda_decode_nb.decode(encode)
return ms,
def mpda_eval_discrete_rc(individual):
encode = np.zeros((ins.robNum, ins.taskNum), dtype=int)
i = 0
for robID in range(IND_ROBNUM):
for taskID in range(IND_TASKNUM):
encode[robID][taskID] = individual[i]
i += 1
mpda_decode_rc = MPDA_Decode_Discrete_RC()
# print(encode)
ms = mpda_decode_rc.decode(encode)
return ms,
def mpda_mate(ind1,ind2):
for i in range(0,len(ind1),IND_TASKNUM):
cxInd1 = ind1[i:i+IND_TASKNUM]
cxInd2 = ind2[i:i+IND_TASKNUM]
# print(cxInd1)
# print(cxInd2)
mpda_cxPartialyMatched(cxInd1,cxInd2)
# print('change cxInd1 = ',cxInd1)
# print('change cxInd2 = ',cxInd2)
ind1[i:i + IND_TASKNUM] = cxInd1
ind2[i:i + IND_TASKNUM] = cxInd2
# print(ind1)
# print(ind2)
return ind1,ind2
def mpda_cxPartialyMatched(ind1, ind2):
"""Executes a partially matched crossover (PMX) on the input individuals.
The two individuals are modified in place. This crossover expects
:term:`sequence` individuals of indices, the result for any other type of
individuals is unpredictable.
:param ind1: The first individual participating in the crossover.
:param ind2: The second individual participating in the crossover.
:returns: A tuple of two individuals.
Moreover, this crossover generates two children by matching
pairs of values in a certain range of the two parents and swapping the values
of those indexes. For more details see [Goldberg1985]_.
This function uses the :func:`~random.randint` function from the python base
:mod:`random` module.
.. [Goldberg1985] Goldberg and Lingel, "Alleles, loci, and the traveling
salesman problem", 1985.
"""
size = min(len(ind1), len(ind2))
p1, p2 = [0] * size, [0] * size
# Initialize the position of each indices in the individuals
for i in range(size):
p1[ind1[i]] = i
p2[ind2[i]] = i
# Choose crossover points
cxpoint1 = random.randint(0, size)
cxpoint2 = random.randint(0, size - 1)
if cxpoint2 >= cxpoint1:
cxpoint2 += 1
else: # Swap the two cx points
cxpoint1, cxpoint2 = cxpoint2, cxpoint1
# Apply crossover between cx points
for i in range(cxpoint1, cxpoint2):
# Keep track of the selected values
temp1 = ind1[i]
temp2 = ind2[i]
# Swap the matched value
ind1[i], ind1[p1[temp2]] = temp2, temp1
ind2[i], ind2[p2[temp1]] = temp1, temp2
# Position bookkeeping
p1[temp1], p1[temp2] = p1[temp2], p1[temp1]
p2[temp1], p2[temp2] = p2[temp2], p2[temp1]
return ind1, ind2
def mpda_mutate(individual, indpb):
size = len(individual)
for robID in range(IND_ROBNUM):
for i in range(IND_TASKNUM):
if random.random() < indpb:
swap_indx = random.randint(0, IND_TASKNUM - 2)
if swap_indx >= i:
swap_indx += 1
individual[i + robID * IND_TASKNUM], individual[swap_indx + robID * IND_TASKNUM] = \
individual[swap_indx+ robID * IND_TASKNUM], individual[i+ robID * IND_TASKNUM]
return individual,
toolbox.register("mpda_attr",mpda_init_encode,IND_ROBNUM,IND_TASKNUM)
toolbox.register("individual", tools.initIterate, creator.Individual,
toolbox.mpda_attr)
# define the population to be a list of individuals
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
# ----------
# Operator registration
# ----------
# register the goal / fitness function
toolbox.register("evaluate",mpda_eval_discrete_nb)
# register the crossover operator
toolbox.register("mate",mpda_mate)
# register a mutation operator with a probability to
# flip each attribute/gene of 0.05
toolbox.register("mutate", mpda_mutate, indpb=0.01)
# tools.mutShuffleIndexes
# tools.mutShuffleIndexes()
# operator for selecting individuals for breeding the next
# generation: each individual of the current generation
# is replaced by the 'fittest' (best) of three individuals
# drawn randomly from the current generation.
# tools.selAutomaticEpsilonLexicase(), tournsize=3
toolbox.register("select", tools.selAutomaticEpsilonLexicase)
# tools.s
# ----------
f_data = open('.//debugData//GA_'+insName,'w')
def main():
random.seed(64)
# create an initial population of 300 individuals (where
# each individual is a list of integers)
start = time.clock()
pop = toolbox.population(n=300)
# CXPB is the probability with which two individuals
# are crossed
#
# MUTPB is the probability for mutating an individual
CXPB, MUTPB = 0.5, 0.2
print("Start of evolution")
# Evaluate the entire population
fitnesses = list(map(toolbox.evaluate, pop))
for ind, fit in zip(pop, fitnesses):
ind.fitness.values = fit
print(" Evaluated %i individuals" % len(pop))
# Extracting all the fitnesses of
fits = [ind.fitness.values[0] for ind in pop]
# Variable keeping track of the number of generations
g = 0
# Begin the evolution
while g < 600:
# A new generation
g = g + 1
print("-- Generation %i --" % g)
# Select the next generation individuals
offspring = toolbox.select(pop, len(pop))
# Clone the selected individuals
offspring = list(map(toolbox.clone, offspring))
# Apply crossover and mutation on the offspring
for child1, child2 in zip(offspring[::2], offspring[1::2]):
# cross two individuals with probability CXPB
if random.random() < CXPB:
toolbox.mate(child1, child2)
# fitness values of the children
# must be recalculated later
del child1.fitness.values
del child2.fitness.values
for mutant in offspring:
# mutate an individual with probability MUTPB
if random.random() < MUTPB:
toolbox.mutate(mutant)
del mutant.fitness.values
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
print(" Evaluated %i individuals" % len(invalid_ind))
# The population is entirely replaced by the offspring
pop[:] = offspring
# Gather all the fitnesses in one list and print the stats
fits = [ind.fitness.values[0] for ind in pop]
length = len(pop)
mean = sum(fits) / length
sum2 = sum(x * x for x in fits)
std = abs(sum2 / length - mean ** 2) ** 0.5
print(" Min %s" % min(fits))
print(" Max %s" % max(fits))
print(" Avg %s" % mean)
print(" Std %s" % std)
f_data.write(str(g)+' '+ str(min(fits))+ ' ' + str(max(fits)) + '\n')
f_data.flush()
print("-- End of (successful) evolution --")
end = time.clock()
f_data.write('time ='+str(end-start) + '\n')
best_ind = tools.selBest(pop, 1)[0]
print("Best individual is %s, %s" % (best_ind, best_ind.fitness.values))
f_data.close()
# print(toolbox.select)
if __name__ == "__main__":
# random.seed(1)
# a = mpda_init_encode(3,4)
# b = mpda_init_encode(3,4)
# print('a = ',a)
# print('b = ',b)
#
#
# x,y = mpda_mate(a,b)
# print('a = ',a)
# print('b = ',b)
#
# print('x = ',x)
# print('y = ',y)
# a = random.sample(range(10),10)
# b = random.sample(range(10),10)
# print(mpda_cxPartialyMatched(a,b))
main()
# tools.initIterate()
# = [3,3,4]
# tools.initCycle(list,toolbox.indices,3) | [
"deap.base.Toolbox",
"MPDA_decode.MPDA_decode_discrete.MPDA_Decode_Discrete_NB",
"random.randint",
"random.shuffle",
"MPDA_decode.instance.Instance",
"numpy.zeros",
"time.clock",
"random.random",
"deap.creator.create",
"MPDA_decode.MPDA_decode_discrete.MPDA_Decode_Discrete_RC",
"random.seed",
... | [((885, 944), 'deap.creator.create', 'creator.create', (['"""FitnessMin"""', 'base.Fitness'], {'weights': '(-1.0,)'}), "('FitnessMin', base.Fitness, weights=(-1.0,))\n", (899, 944), False, 'from deap import creator\n'), ((945, 1021), 'deap.creator.create', 'creator.create', (['"""Individual"""', 'list'], {'typecode': '"""i"""', 'fitness': 'creator.FitnessMin'}), "('Individual', list, typecode='i', fitness=creator.FitnessMin)\n", (959, 1021), False, 'from deap import creator\n'), ((1033, 1047), 'deap.base.Toolbox', 'base.Toolbox', ([], {}), '()\n', (1045, 1047), False, 'from deap import base\n'), ((1505, 1541), 'MPDA_decode.instance.Instance', 'Instance', (["('.\\\\benchmark\\\\' + insName)"], {}), "('.\\\\benchmark\\\\' + insName)\n", (1513, 1541), False, 'from MPDA_decode.instance import Instance\n'), ((2002, 2048), 'numpy.zeros', 'np.zeros', (['(ins.robNum, ins.taskNum)'], {'dtype': 'int'}), '((ins.robNum, ins.taskNum), dtype=int)\n', (2010, 2048), True, 'import numpy as np\n'), ((2227, 2252), 'MPDA_decode.MPDA_decode_discrete.MPDA_Decode_Discrete_NB', 'MPDA_Decode_Discrete_NB', ([], {}), '()\n', (2250, 2252), False, 'from MPDA_decode.MPDA_decode_discrete import MPDA_Decode_Discrete_NB, MPDA_Decode_Discrete_Base, MPDA_Decode_Discrete_RC\n'), ((2382, 2428), 'numpy.zeros', 'np.zeros', (['(ins.robNum, ins.taskNum)'], {'dtype': 'int'}), '((ins.robNum, ins.taskNum), dtype=int)\n', (2390, 2428), True, 'import numpy as np\n'), ((2607, 2632), 'MPDA_decode.MPDA_decode_discrete.MPDA_Decode_Discrete_RC', 'MPDA_Decode_Discrete_RC', ([], {}), '()\n', (2630, 2632), False, 'from MPDA_decode.MPDA_decode_discrete import MPDA_Decode_Discrete_NB, MPDA_Decode_Discrete_Base, MPDA_Decode_Discrete_RC\n'), ((4366, 4389), 'random.randint', 'random.randint', (['(0)', 'size'], {}), '(0, size)\n', (4380, 4389), False, 'import random\n'), ((4405, 4432), 'random.randint', 'random.randint', (['(0)', '(size - 1)'], {}), '(0, size - 1)\n', (4419, 4432), False, 'import random\n'), ((6704, 6719), 'random.seed', 'random.seed', (['(64)'], {}), '(64)\n', (6715, 6719), False, 'import random\n'), ((6839, 6851), 'time.clock', 'time.clock', ([], {}), '()\n', (6849, 6851), False, 'import time\n'), ((9469, 9481), 'time.clock', 'time.clock', ([], {}), '()\n', (9479, 9481), False, 'import time\n'), ((1854, 1877), 'random.shuffle', 'random.shuffle', (['permLst'], {}), '(permLst)\n', (1868, 1877), False, 'import random\n'), ((9546, 9567), 'deap.tools.selBest', 'tools.selBest', (['pop', '(1)'], {}), '(pop, 1)\n', (9559, 9567), False, 'from deap import tools\n'), ((5181, 5196), 'random.random', 'random.random', ([], {}), '()\n', (5194, 5196), False, 'import random\n'), ((5234, 5268), 'random.randint', 'random.randint', (['(0)', '(IND_TASKNUM - 2)'], {}), '(0, IND_TASKNUM - 2)\n', (5248, 5268), False, 'import random\n'), ((7996, 8011), 'random.random', 'random.random', ([], {}), '()\n', (8009, 8011), False, 'import random\n'), ((8352, 8367), 'random.random', 'random.random', ([], {}), '()\n', (8365, 8367), False, 'import random\n')] |
from utils.data_reader import prepare_data, prepare_data_loaders
from utils.utils import getMetrics
import torch.nn as nn
import torch
import numpy as np
from tqdm import tqdm
import os
import pandas as pd
import numpy as np
import os
import math
import random
import numpy as np
from utils import constant
pred_file_path = constant.pred_file_path
ground_file_path = constant.ground_file_path
emotion2label = {"others":0, "happy":1, "sad":2, "angry":3}
label2emotion = {0:"others", 1:"happy", 2: "sad", 3:"angry"}
def read_prediction(file_path):
preds = []
with open(file_path, "r") as read_file:
for line in read_file:
# print(line.replace("\n",""))
_, _, _, _, label = line.replace("\n", "").split("\t")
if label in emotion2label:
preds.append(np.array(emotion2label[label]))
return np.array(preds)
pred = read_prediction(pred_file_path)
one_hot = np.zeros((pred.shape[0], 4))
one_hot[np.arange(pred.shape[0]), pred] = 1
pred = one_hot
ground = read_prediction(ground_file_path)
print(pred, ground)
print(pred.shape, ground.shape)
accuracy, microPrecision, microRecall, microF1 = getMetrics(pred, ground,True)
print(microF1) | [
"numpy.array",
"utils.utils.getMetrics",
"numpy.zeros",
"numpy.arange"
] | [((936, 964), 'numpy.zeros', 'np.zeros', (['(pred.shape[0], 4)'], {}), '((pred.shape[0], 4))\n', (944, 964), True, 'import numpy as np\n'), ((1171, 1201), 'utils.utils.getMetrics', 'getMetrics', (['pred', 'ground', '(True)'], {}), '(pred, ground, True)\n', (1181, 1201), False, 'from utils.utils import getMetrics\n'), ((870, 885), 'numpy.array', 'np.array', (['preds'], {}), '(preds)\n', (878, 885), True, 'import numpy as np\n'), ((973, 997), 'numpy.arange', 'np.arange', (['pred.shape[0]'], {}), '(pred.shape[0])\n', (982, 997), True, 'import numpy as np\n'), ((822, 852), 'numpy.array', 'np.array', (['emotion2label[label]'], {}), '(emotion2label[label])\n', (830, 852), True, 'import numpy as np\n')] |
#!/usr/bin/env/python3
"""Recipe for training a neural speech separation system on wsjmix the
dataset. The system employs an encoder, a decoder, and a masking network.
To run this recipe, do the following:
> python train.py hparams/sepformer.yaml
> python train.py hparams/dualpath_rnn.yaml
> python train.py hparams/convtasnet.yaml
The experiment file is flexible enough to support different neural
networks. By properly changing the parameter files, you can try
different architectures. The script supports both wsj2mix and
wsj3mix.
Authors
* <NAME> 2020
* <NAME> 2020
* <NAME> 2020
* <NAME> 2020
* <NAME> 2020
* <NAME> 2021
"""
# Libraries
import csv
import logging
import numpy as np
import os
import speechbrain as sb
import speechbrain.nnet.schedulers as schedulers
import sys
import torch
import torch.nn.functional as F
import torchaudio
# Partial imports
from hyperpyyaml import load_hyperpyyaml
from mir_eval.separation import bss_eval_sources
from torch.utils.data import DataLoader
# External files
from augment import FlipChannels, FlipSign, Remix, Shift
from datasets import MusdbDataset, Rawset
# from raw import Rawset
from tasnet import ConvTasNet
# Define training procedure
class Separation(sb.Brain):
def compute_forward(self, targets, stage, inputs=None):
"""
:param mixture: raw audio - dimension [batch_size, time]
:param stage:
:param init_params:
:return:
"""
if stage == sb.Stage.TRAIN:
targets = self.augment_data(targets)
inputs = targets.sum(dim=1)
# Forward pass
est_source = self.hparams.convtasnet(inputs)
# Normalization
est_source = est_source / est_source.abs().max(dim=-1, keepdim=True)[0]
# T changed after conv1d in encoder, fix it here
T_origin = inputs.size(-1)
T_est = est_source.size(-1)
if T_origin > T_est:
est_source = F.pad(est_source, (0, T_origin - T_est))
else:
est_source = est_source[:, :, :, :T_origin]
# [B, T, Number of speaker=2]
return est_source, targets
def compute_objectives(self, predictions, targets):
"""Computes the sinr loss"""
return self.hparams.loss(source=targets, estimate_source=predictions)
def fit_batch(self, batch):
"""Trains one batch"""
# Get inputs
inputs = batch[:, 1:, :, :].to(self.device)
# Forward pass
predictions, targets = self.compute_forward(inputs, sb.Stage.TRAIN)
# Permute to fit expected shape in loss function
predictions, targets = (
predictions.permute(3, 0, 2, 1),
targets.permute(3, 0, 2, 1),
)
predictions = predictions.reshape(
predictions.size(0), -1, predictions.size(-1)
)
targets = targets.reshape(targets.size(0), -1, targets.size(-1))
# Compute loss
loss = self.compute_objectives(predictions, targets)
loss = loss.mean()
# Fix for computational problems
if (loss < self.hparams.loss_upper_lim and loss.nelement() > 0):
loss.backward()
if self.hparams.clip_grad_norm >= 0:
torch.nn.utils.clip_grad_norm_(
self.modules.parameters(), self.hparams.clip_grad_norm
)
self.optimizer.step()
else:
self.nonfinite_count += 1
logger.info(
"infinite loss or empty loss! it happened {} times so far - skipping this batch".format(
self.nonfinite_count
)
)
loss.data = torch.tensor(0).to(self.device)
self.optimizer.zero_grad()
return loss.detach().cpu()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
if stage == sb.Stage.VALID:
mixture = batch[:, 0, :, :].to(self.device)
targets = batch[:, 1:, :, :].to(self.device)
predictions, targets = self.compute_forward(targets, sb.Stage.TRAIN)
predictions, targets = (
predictions.permute(3, 0, 2, 1),
targets.permute(3, 0, 2, 1),
)
predictions = predictions.reshape(
predictions.size(0), -1, predictions.size(-1)
)
targets = targets.reshape(targets.size(0), -1, targets.size(-1))
loss = self.compute_objectives(predictions, targets).mean()
elif stage == sb.Stage.TEST:
# Send to device
mixture = batch[0].to(self.device)
targets = batch[1].to(self.device)
with torch.no_grad():
ref = mixture.mean(dim=0)
inp = mixture[:, :, :]
inp = inp.to("cpu")
# Get Prediction
predictions, _ = self.compute_forward(
targets=None, inputs=inp, stage=sb.Stage.TEST
)
# Send to CPU
predictions = predictions.to("cpu")
mixture = mixture.to("cpu")
targets = targets.to("cpu")
ref = ref.to("cpu")
# Normalize
predictions = predictions * ref.std() + ref.mean()
# Predicted Values
vocals_hat = predictions[0, 0, :, :].numpy()
drums_hat = predictions[0, 1, :, :].numpy()
bass_hat = predictions[0, 2, :, :].numpy()
accompaniment_hat = predictions[0, 3, :, :].numpy()
# True Values
vocals = targets[0, 0, :, :].t().numpy()
drums = targets[0, 1, :, :].t().numpy()
bass = targets[0, 2, :, :].t().numpy()
accompaniment = targets[0, 3, :, :].t().numpy()
# SDR
vocals_sdr = self.get_sdr(vocals, vocals_hat)
drums_sdr = self.get_sdr(drums, drums_hat)
bass_sdr = self.get_sdr(bass, bass_hat)
accompaniment_sdr = self.get_sdr(accompaniment, accompaniment_hat)
sdr = np.array([vocals_sdr, drums_sdr, bass_sdr, accompaniment_sdr]).mean()
# Keep track of SDR values
self.result_report["all_sdrs"].append(sdr)
self.result_report["all_vocals_sdrs"].append(vocals_sdr)
self.result_report["all_drums_sdrs"].append(drums_sdr)
self.result_report["all_bass_sdrs"].append(bass_sdr)
self.result_report["all_accompaniment_sdrs"].append(accompaniment_sdr)
# Create audio folder if it doesn't already exists
results_path = self.hparams.save_folder + "/audio_results"
if not os.path.exists(results_path):
os.makedirs(results_path)
# Save only examples of the best results
if sdr > 4.0:
self.save_audio(separator.testindex, results_path, mixture, predictions, targets)
# Empty loss to satisfy return type of method
loss = torch.tensor([0])
# Increment count
separator.testindex += 1
return loss.detach()
def augment_data(self, inputs):
augment = torch.nn.Sequential(
FlipSign(),
FlipChannels(),
Shift(self.hparams.sample_rate),
Remix(group_size=1)
).to(self.hparams.device)
return augment(inputs)
def get_sdr(self, source, prediction):
source = protect_non_zeros(source)
sdr, _, _, _ = bss_eval_sources(source, prediction)
return sdr.mean()
def save_audio(self, i, results_path, mixture, predictions, targets):
# Predictions
torchaudio.save(
filepath=results_path + "/song_{}_mix.wav".format(i),
src=mixture[0, :, :],
sample_rate=self.hparams.sample_rate
)
torchaudio.save(
filepath=results_path + "/song_{}_drums_hat.wav".format(i),
src=predictions[0, 0, :, :],
sample_rate=self.hparams.sample_rate
)
torchaudio.save(
filepath=results_path + "/song_{}_bass_hat.wav".format(i),
src=predictions[0, 1, :, :],
sample_rate=self.hparams.sample_rate
)
torchaudio.save(
filepath=results_path + "/song_{}_accompaniment_hat.wav".format(i),
src=predictions[0, 2, :, :],
sample_rate=self.hparams.sample_rate
)
torchaudio.save(
filepath=results_path + "/song_{}_vocals_hat.wav".format(i),
src=predictions[0, 3, :, :],
sample_rate=self.hparams.sample_rate
)
# Targets
torchaudio.save(
filepath=results_path + "/song_{}_drums.wav".format(i),
src=targets[0, 0, :, :].t(),
sample_rate=self.hparams.sample_rate
)
torchaudio.save(
filepath=results_path + "/song_{}_bass.wav".format(i),
src=targets[0, 1, :, :].t(),
sample_rate=self.hparams.sample_rate
)
torchaudio.save(
filepath=results_path + "/song_{}_accompaniment.wav".format(i),
src=targets[0, 2, :, :].t(),
sample_rate=self.hparams.sample_rate
)
torchaudio.save(
filepath=results_path + "/song_{}_vocals.wav".format(i),
src=targets[0, 3, :, :].t(),
sample_rate=self.hparams.sample_rate
)
def save_results(self):
print(self.result_report)
print("Saving Results...")
# Create folders where to store audio
save_file = os.path.join(self.hparams.output_folder, "test_results.csv")
# CSV columns
csv_columns = [
"ID",
"Vocals SDR",
"Drums SDR",
"Bass SDR",
"Accompaniment SDR",
"SDR"
]
# Create CSV file
with open(save_file, "w") as results_csv:
writer = csv.DictWriter(results_csv, fieldnames=csv_columns)
writer.writeheader()
# Loop all instances
for i in range(len(self.result_report["all_sdrs"])):
row = {
"ID": i,
"Vocals SDR": self.result_report["all_vocals_sdrs"][i],
"Drums SDR": self.result_report["all_drums_sdrs"][i],
"Bass SDR": self.result_report["all_bass_sdrs"][i],
"Accompaniment SDR": self.result_report["all_accompaniment_sdrs"][i],
"SDR": self.result_report["all_sdrs"][i],
}
writer.writerow(row)
# Average
row = {
"ID": "Average",
"Vocals SDR": np.mean(self.result_report["all_vocals_sdrs"]),
"Drums SDR": np.mean(self.result_report["all_drums_sdrs"]),
"Bass SDR": np.mean(self.result_report["all_bass_sdrs"]),
"Accompaniment SDR": np.mean(self.result_report["all_accompaniment_sdrs"]),
"SDR": np.mean(self.result_report["all_sdrs"]),
}
writer.writerow(row)
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of a epoch."""
# Compute/store important stats
stage_stats = {"si-snr": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
# Learning rate annealing
if isinstance(
self.hparams.lr_scheduler, schedulers.ReduceLROnPlateau
):
current_lr, next_lr = self.hparams.lr_scheduler(
[self.optimizer], epoch, stage_loss
)
schedulers.update_learning_rate(self.optimizer, next_lr)
else:
# if we do not use the reducelronplateau, we do not change the lr
current_lr = self.hparams.optimizer.optim.param_groups[0]["lr"]
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": current_lr},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"si-snr": stage_stats["si-snr"]}, min_keys=["si-snr"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
def reset_layer_recursively(self, layer):
"""Reinitializes the parameters of the neural networks"""
if hasattr(layer, "reset_parameters"):
layer.reset_parameters()
for child_layer in layer.modules():
if layer != child_layer:
self.reset_layer_recursively(child_layer)
def protect_non_zeros(source):
dims = source.shape[0]
for d in range(dims):
if np.sum(source[d]) == 0:
source[d][0] = 0.001
return source
if __name__ == "__main__":
# Load hyperparameters file with command-line overrides
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Initialize ddp (useful only for multi-GPU DDP training)
sb.utils.distributed.ddp_init_group(run_opts)
# Logger info
logger = logging.getLogger(__name__)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Test dataset & loader
test_set = MusdbDataset(hparams)
test_loader = DataLoader(test_set, batch_size=hparams["batch"], shuffle=False)
# Create training dataset & loaders if not in test only mode
if not hparams["test_only"]:
train_set = Rawset(
os.path.join(hparams["musdb_raw_path"], "train"),
samples=hparams["sample_rate"] * 5,
channels=2,
streams=[0, 1, 2, 3, 4],
stride=hparams["sample_rate"],
)
train_loader = DataLoader(
train_set, batch_size=hparams["N_batch"], shuffle=True
)
valid_set = Rawset(
os.path.join(hparams["musdb_raw_path"], "valid"),
samples=hparams["sample_rate"] * 5,
channels=2,
streams=[0, 1, 2, 3, 4],
stride=hparams["sample_rate"],
)
valid_loader = DataLoader(
valid_set, batch_size=hparams["N_batch"], shuffle=False
)
# Brain class initialization
separator = Separation(
modules=hparams["modules"],
opt_class=hparams["optimizer"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# re-initialize the parameters
for module in separator.modules.values():
separator.reset_layer_recursively(module)
# Start training if not in test only mode
if not hparams["test_only"]:
# Training
separator.fit(
separator.hparams.epoch_counter,
train_loader, valid_loader
)
# Model Evaluation
separator.modules = separator.modules.to('cpu')
separator.modules.eval()
separator.testindex = 0
separator.result_report = {
"all_sdrs": [],
"all_vocals_sdrs": [],
"all_drums_sdrs": [],
"all_bass_sdrs": [],
"all_accompaniment_sdrs": []
}
# Evaluate Model
separator.evaluate(test_loader, min_key="si-snr")
# Save Results
separator.save_results()
| [
"augment.FlipSign",
"numpy.sum",
"speechbrain.nnet.schedulers.update_learning_rate",
"speechbrain.create_experiment_directory",
"logging.getLogger",
"numpy.mean",
"datasets.MusdbDataset",
"torch.no_grad",
"os.path.join",
"csv.DictWriter",
"torch.nn.functional.pad",
"speechbrain.utils.distribut... | [((13460, 13492), 'speechbrain.parse_arguments', 'sb.parse_arguments', (['sys.argv[1:]'], {}), '(sys.argv[1:])\n', (13478, 13492), True, 'import speechbrain as sb\n'), ((13647, 13692), 'speechbrain.utils.distributed.ddp_init_group', 'sb.utils.distributed.ddp_init_group', (['run_opts'], {}), '(run_opts)\n', (13682, 13692), True, 'import speechbrain as sb\n'), ((13725, 13752), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (13742, 13752), False, 'import logging\n'), ((13792, 13929), 'speechbrain.create_experiment_directory', 'sb.create_experiment_directory', ([], {'experiment_directory': "hparams['output_folder']", 'hyperparams_to_save': 'hparams_file', 'overrides': 'overrides'}), "(experiment_directory=hparams['output_folder'\n ], hyperparams_to_save=hparams_file, overrides=overrides)\n", (13822, 13929), True, 'import speechbrain as sb\n'), ((14000, 14021), 'datasets.MusdbDataset', 'MusdbDataset', (['hparams'], {}), '(hparams)\n', (14012, 14021), False, 'from datasets import MusdbDataset, Rawset\n'), ((14040, 14104), 'torch.utils.data.DataLoader', 'DataLoader', (['test_set'], {'batch_size': "hparams['batch']", 'shuffle': '(False)'}), "(test_set, batch_size=hparams['batch'], shuffle=False)\n", (14050, 14104), False, 'from torch.utils.data import DataLoader\n'), ((7668, 7704), 'mir_eval.separation.bss_eval_sources', 'bss_eval_sources', (['source', 'prediction'], {}), '(source, prediction)\n', (7684, 7704), False, 'from mir_eval.separation import bss_eval_sources\n'), ((9770, 9830), 'os.path.join', 'os.path.join', (['self.hparams.output_folder', '"""test_results.csv"""'], {}), "(self.hparams.output_folder, 'test_results.csv')\n", (9782, 9830), False, 'import os\n'), ((13547, 13579), 'hyperpyyaml.load_hyperpyyaml', 'load_hyperpyyaml', (['fin', 'overrides'], {}), '(fin, overrides)\n', (13563, 13579), False, 'from hyperpyyaml import load_hyperpyyaml\n'), ((14480, 14546), 'torch.utils.data.DataLoader', 'DataLoader', (['train_set'], {'batch_size': "hparams['N_batch']", 'shuffle': '(True)'}), "(train_set, batch_size=hparams['N_batch'], shuffle=True)\n", (14490, 14546), False, 'from torch.utils.data import DataLoader\n'), ((14846, 14913), 'torch.utils.data.DataLoader', 'DataLoader', (['valid_set'], {'batch_size': "hparams['N_batch']", 'shuffle': '(False)'}), "(valid_set, batch_size=hparams['N_batch'], shuffle=False)\n", (14856, 14913), False, 'from torch.utils.data import DataLoader\n'), ((1943, 1983), 'torch.nn.functional.pad', 'F.pad', (['est_source', '(0, T_origin - T_est)'], {}), '(est_source, (0, T_origin - T_est))\n', (1948, 1983), True, 'import torch.nn.functional as F\n'), ((10128, 10179), 'csv.DictWriter', 'csv.DictWriter', (['results_csv'], {'fieldnames': 'csv_columns'}), '(results_csv, fieldnames=csv_columns)\n', (10142, 10179), False, 'import csv\n'), ((14244, 14292), 'os.path.join', 'os.path.join', (["hparams['musdb_raw_path']", '"""train"""'], {}), "(hparams['musdb_raw_path'], 'train')\n", (14256, 14292), False, 'import os\n'), ((14610, 14658), 'os.path.join', 'os.path.join', (["hparams['musdb_raw_path']", '"""valid"""'], {}), "(hparams['musdb_raw_path'], 'valid')\n", (14622, 14658), False, 'import os\n'), ((10900, 10946), 'numpy.mean', 'np.mean', (["self.result_report['all_vocals_sdrs']"], {}), "(self.result_report['all_vocals_sdrs'])\n", (10907, 10946), True, 'import numpy as np\n'), ((10977, 11022), 'numpy.mean', 'np.mean', (["self.result_report['all_drums_sdrs']"], {}), "(self.result_report['all_drums_sdrs'])\n", (10984, 11022), True, 'import numpy as np\n'), ((11052, 11096), 'numpy.mean', 'np.mean', (["self.result_report['all_bass_sdrs']"], {}), "(self.result_report['all_bass_sdrs'])\n", (11059, 11096), True, 'import numpy as np\n'), ((11135, 11188), 'numpy.mean', 'np.mean', (["self.result_report['all_accompaniment_sdrs']"], {}), "(self.result_report['all_accompaniment_sdrs'])\n", (11142, 11188), True, 'import numpy as np\n'), ((11213, 11252), 'numpy.mean', 'np.mean', (["self.result_report['all_sdrs']"], {}), "(self.result_report['all_sdrs'])\n", (11220, 11252), True, 'import numpy as np\n'), ((11987, 12043), 'speechbrain.nnet.schedulers.update_learning_rate', 'schedulers.update_learning_rate', (['self.optimizer', 'next_lr'], {}), '(self.optimizer, next_lr)\n', (12018, 12043), True, 'import speechbrain.nnet.schedulers as schedulers\n'), ((13248, 13265), 'numpy.sum', 'np.sum', (['source[d]'], {}), '(source[d])\n', (13254, 13265), True, 'import numpy as np\n'), ((3664, 3679), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (3676, 3679), False, 'import torch\n'), ((4712, 4727), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4725, 4727), False, 'import torch\n'), ((7164, 7181), 'torch.tensor', 'torch.tensor', (['[0]'], {}), '([0])\n', (7176, 7181), False, 'import torch\n'), ((7376, 7386), 'augment.FlipSign', 'FlipSign', ([], {}), '()\n', (7384, 7386), False, 'from augment import FlipChannels, FlipSign, Remix, Shift\n'), ((7400, 7414), 'augment.FlipChannels', 'FlipChannels', ([], {}), '()\n', (7412, 7414), False, 'from augment import FlipChannels, FlipSign, Remix, Shift\n'), ((7428, 7459), 'augment.Shift', 'Shift', (['self.hparams.sample_rate'], {}), '(self.hparams.sample_rate)\n', (7433, 7459), False, 'from augment import FlipChannels, FlipSign, Remix, Shift\n'), ((7473, 7492), 'augment.Remix', 'Remix', ([], {'group_size': '(1)'}), '(group_size=1)\n', (7478, 7492), False, 'from augment import FlipChannels, FlipSign, Remix, Shift\n'), ((6812, 6840), 'os.path.exists', 'os.path.exists', (['results_path'], {}), '(results_path)\n', (6826, 6840), False, 'import os\n'), ((6862, 6887), 'os.makedirs', 'os.makedirs', (['results_path'], {}), '(results_path)\n', (6873, 6887), False, 'import os\n'), ((6173, 6235), 'numpy.array', 'np.array', (['[vocals_sdr, drums_sdr, bass_sdr, accompaniment_sdr]'], {}), '([vocals_sdr, drums_sdr, bass_sdr, accompaniment_sdr])\n', (6181, 6235), True, 'import numpy as np\n')] |
## This is originally from: http://nghiaho.com/?page_id=671
import numpy as np
# Input: expects Nx3 matrix of points
# Returns R,t
# R = 3x3 rotation matrix
# t = 3x1 column vector
def rigid_transform_3D(A, B):
assert len(A) == len(B)
N = A.shape[0] # total points
centroid_A = np.mean(A, axis=0)
centroid_B = np.mean(B, axis=0)
# center the points
#AA = A - np.tile(centroid_A, (N, 1))
#BB = B - np.tile(centroid_B, (N, 1))
# The following should be identical:
AA = A - centroid_A
BB = B - centroid_B
# ...ie, there is no translation between AA and BB, only rotation
# dot is matrix multiplication for array
H = np.dot(AA.T, BB)
U, S, Vt = np.linalg.svd(H)
R = np.dot(Vt.T, U.T)
inv_H = np.dot(BB.T, AA)
invU, invS, invVt = np.linalg.svd(inv_H)
inv_R = np.dot(invVt.T, invU.T)
# special reflection case
if np.linalg.det(R) < 0:
print("Reflection detected")
Vt[2,:] *= -1
R = np.dot(Vt.T, U.T)
if np.linalg.det(inv_R) < 0:
print("Reflection detected")
invVt[2,:] *= -1
inv_R = np.dot(invVt.T, invU.T)
t = centroid_B.T - np.dot(R, centroid_A.T)
#print t
return R, t, inv_R
| [
"numpy.linalg.svd",
"numpy.dot",
"numpy.mean",
"numpy.linalg.det"
] | [((299, 317), 'numpy.mean', 'np.mean', (['A'], {'axis': '(0)'}), '(A, axis=0)\n', (306, 317), True, 'import numpy as np\n'), ((335, 353), 'numpy.mean', 'np.mean', (['B'], {'axis': '(0)'}), '(B, axis=0)\n', (342, 353), True, 'import numpy as np\n'), ((677, 693), 'numpy.dot', 'np.dot', (['AA.T', 'BB'], {}), '(AA.T, BB)\n', (683, 693), True, 'import numpy as np\n'), ((709, 725), 'numpy.linalg.svd', 'np.linalg.svd', (['H'], {}), '(H)\n', (722, 725), True, 'import numpy as np\n'), ((734, 751), 'numpy.dot', 'np.dot', (['Vt.T', 'U.T'], {}), '(Vt.T, U.T)\n', (740, 751), True, 'import numpy as np\n'), ((765, 781), 'numpy.dot', 'np.dot', (['BB.T', 'AA'], {}), '(BB.T, AA)\n', (771, 781), True, 'import numpy as np\n'), ((806, 826), 'numpy.linalg.svd', 'np.linalg.svd', (['inv_H'], {}), '(inv_H)\n', (819, 826), True, 'import numpy as np\n'), ((839, 862), 'numpy.dot', 'np.dot', (['invVt.T', 'invU.T'], {}), '(invVt.T, invU.T)\n', (845, 862), True, 'import numpy as np\n'), ((901, 917), 'numpy.linalg.det', 'np.linalg.det', (['R'], {}), '(R)\n', (914, 917), True, 'import numpy as np\n'), ((994, 1011), 'numpy.dot', 'np.dot', (['Vt.T', 'U.T'], {}), '(Vt.T, U.T)\n', (1000, 1011), True, 'import numpy as np\n'), ((1020, 1040), 'numpy.linalg.det', 'np.linalg.det', (['inv_R'], {}), '(inv_R)\n', (1033, 1040), True, 'import numpy as np\n'), ((1124, 1147), 'numpy.dot', 'np.dot', (['invVt.T', 'invU.T'], {}), '(invVt.T, invU.T)\n', (1130, 1147), True, 'import numpy as np\n'), ((1172, 1195), 'numpy.dot', 'np.dot', (['R', 'centroid_A.T'], {}), '(R, centroid_A.T)\n', (1178, 1195), True, 'import numpy as np\n')] |
"""
Collection of utility functions for wrapping-textures.
Written by <NAME>
"""
from __future__ import print_function
import sys
import time
import itertools
import logging
import numpy
from recordclass import recordclass
######################################
# Record classes for neccessary data #
######################################
UV = recordclass('UV', ['u', 'v'])
Pixel = recordclass('Pixel', ['x', 'y'])
XY = recordclass('XY', ['x', 'y'])
XYZ = recordclass('XYZ', ['x', 'y', 'z'])
# Quadtratic energy: x.T @ Q @ x + 2 * x.T @ L + C = 0
QuadEnergy = recordclass('QuadraticEnergy', ['Q', 'L', 'C'])
def pairwise(iterable):
"""Returns: s -> (s0,s1), (s1,s2), (s2, s3), ..."""
a, b = itertools.tee(iterable)
next(b, None)
return zip(a, b)
def pairwise_loop(iterable):
"""
Create pair wise list of the iterable given with the last element being the
first.
Returns: s -> (s0,s1), (s1,s2), (s2, s3), ..., (sN, s0)
"""
return tuple(pairwise(iterable)) + ((iterable[-1], iterable[0]), )
def rowcol_to_index(row, col, width):
"""Convert row major coordinates to 1-D index."""
return width * row + col
def lerp(t, x0, x1):
"""Linearly interpolate between x0 and x1."""
return x0 + t * (x1 - x0)
def lerpPair(t, p0, p1):
"""Linearly interpolate independent indexed paires."""
return [lerp(t, p0[0], p1[0]), lerp(t, p0[1], p1[1])]
def lerp_UV(t, uv0, uv1):
"""
Linearly interpolate between (u0,v0) and (u1,v1).
Returns a UV object.
"""
return UV(*lerpPair(t, uv0, uv1))
def lerp_XY(t, xy0, xy1):
"""
Linearly interpolate between (x0,y0) and (x1,y1).
Returns a XY object.
"""
return XY(*lerpPair(t, xy0, xy1))
def UV_to_XY(uv, width, height, is_clamped=False):
"""
Convert the given UV to XY coordinates.
uv is defined in terms of GPU UV space.
"""
# s*width - 0.5; t*height - 0.5
xy = XY(x=uv.u * width - 0.5, y=uv.v * height - 0.5)
if is_clamped:
xy = (
numpy.clip(xy[0], 0, max(0, width - 1)),
numpy.clip(xy[1], 0, max(0, height - 1)))
return xy
def UVs_to_XYs(uvEdges, width, height):
"""Convert a UV edge to XY space in the texture."""
return [UV_to_XY(vert, width, height) for edge in uvEdges for vert in edge]
def globalUV_to_local(uv, minX, minY, width, height):
"""
Convert from a texture's global UV to local UV.
Local pixel values defined by the minimum x and y values.
uv is defined in terms of GPU UV space.
"""
x, y = UV_to_XY(uv, width, height, True)
return UV(u=x - minX, v=y - minY)
def globalEdge_to_local(uv0, uv1, minI, width, height):
"""
Convert a edge from a texture's global UV to local UV.
Local pixel values defined by the minimum x and y values.
uv is defined in terms of GPU UV space.
"""
minX = minI % width
minY = minI // width
return [
globalUV_to_local(uv, minX, minY, width, height) for uv in (uv0, uv1)
]
def surrounding_pixels(uv, w, h, as_index=False, as_tuple=False):
"""
Determine the surrounding pixels of the given point at (u,v).
uv is defined in terms of GPU UV space.
Returns a Tuple of surrounding four Pixel objects.
Pixels are ordered as: (Lower Left, Lower Right, Upper Left, Upper Right)
"""
assert not (as_index and as_tuple)
# Convert from GPU UV coordinates to XY coordinates
(x, y) = UV_to_XY(uv, w, h, is_clamped=True)
# Convert from XY to Pixel coordinates
px = int(min(max(0, numpy.floor(x)), w - 2)) # X in Range(0,w-1)
py = int(min(max(0, numpy.floor(y)), h - 2)) # Y in Range(0,h-1)
p00 = Pixel(x=px, y=py)
px = int(min(max(0, numpy.floor(x) + 1), w - 1)) # X in Range(0,w-1)
py = int(min(max(0, numpy.floor(y) + 1), h - 1)) # Y in Range(0,h-1)
p11 = Pixel(x=px, y=py)
# Create tuple of soronding pixels in Pixel Space
ps = (p00, Pixel(x=p11.x, y=p00.y), Pixel(x=p00.x, y=p11.y), p11)
# If requested, convert from Pixel space to 1D index space
if as_index:
return [rowcol_to_index(p.y, p.x, w) for p in ps]
if as_tuple:
return tuple(tuple(p) for p in ps)
return ps
def range_min_max(a, b):
"""Create a range from the min value to the max value."""
return range(int(min(a, b)), int(max(a, b)))
def print_dots(time_delta=1.0):
"""
Print out a dot every time_delta seconds.
Loop after three dots.
"""
dot_count = 0
while True:
if logging.getLogger().getEffectiveLevel() <= logging.INFO:
dot_count = (dot_count % 3) + 1
print(("." * dot_count) + (" " * 3), end="\r")
sys.stdout.flush()
time.sleep(time_delta)
def verts_equal(v0, v1, epsilon=1e-8):
"""
Test if two given vertices are equal within a certain epsilon.
WARNING: This is slower than ==, but it allows for a tolerance level of
equality.
"""
assert epsilon >= 0.0
if len(v0) != len(v1):
return False
for a, b in zip(v0, v1):
if (abs(a - b) > epsilon):
return False
return True
def normalize_array(arr):
"""Normalize the given array to be in range [0,1]."""
minVal = numpy.amin(arr)
maxVal = numpy.amax(arr)
return (arr - minVal) / float(maxVal - minVal)
def is_counterclockwise(v0, v1, v2):
"""
Determine if the triangle defined by the given vertices in
counter-clockwise order.
Input:
v0, v1, v2 - 2D coordinates for the vertices of the triangle
Output:
Returns True if the triangle is counter-clockwise order.
"""
mat = numpy.array([[1, v[0], v[1]] for v in (v0, v1, v2)])
return numpy.linalg.det(mat) > 0
# Convert back to image format
def to_uint8(data, normalize=False):
"""Convert the data in a floating-point vector to unsigned bytes."""
# Normilize the solved values.
if (normalize):
data = normalize_array(data)
for i in range(data.shape[0]):
data[i] = data[i].clip(0.0, 1.0)
data = (data * 255).round().astype("uint8")
return data
def save_ijvs(A, fname):
"""Save a sparse matrix as a list of ijv pairings."""
A = A.tocoo()
height, width = A.shape
M = numpy.empty((A.row.shape[0], 3))
M[:, 0] = A.row
M[:, 1] = A.col
M[:, 2] = A.data
lines = ["%d %d %.17f\n" % (ijv[0], ijv[1], ijv[2]) for ijv in M]
with open(fname, "w") as f:
f.write("%d %d\n" % (height, width))
for line in lines:
f.write(line)
def save_dense(A, fname):
"""Save an array as a text file, one line per row."""
m, n = A.shape
with open(fname, "w") as f:
for row in A:
for val in row:
f.write("%.17f " % val)
f.write("\n")
| [
"recordclass.recordclass",
"numpy.amin",
"numpy.empty",
"numpy.floor",
"time.sleep",
"numpy.amax",
"numpy.array",
"sys.stdout.flush",
"itertools.tee",
"numpy.linalg.det",
"logging.getLogger"
] | [((350, 379), 'recordclass.recordclass', 'recordclass', (['"""UV"""', "['u', 'v']"], {}), "('UV', ['u', 'v'])\n", (361, 379), False, 'from recordclass import recordclass\n'), ((388, 420), 'recordclass.recordclass', 'recordclass', (['"""Pixel"""', "['x', 'y']"], {}), "('Pixel', ['x', 'y'])\n", (399, 420), False, 'from recordclass import recordclass\n'), ((426, 455), 'recordclass.recordclass', 'recordclass', (['"""XY"""', "['x', 'y']"], {}), "('XY', ['x', 'y'])\n", (437, 455), False, 'from recordclass import recordclass\n'), ((462, 497), 'recordclass.recordclass', 'recordclass', (['"""XYZ"""', "['x', 'y', 'z']"], {}), "('XYZ', ['x', 'y', 'z'])\n", (473, 497), False, 'from recordclass import recordclass\n'), ((567, 614), 'recordclass.recordclass', 'recordclass', (['"""QuadraticEnergy"""', "['Q', 'L', 'C']"], {}), "('QuadraticEnergy', ['Q', 'L', 'C'])\n", (578, 614), False, 'from recordclass import recordclass\n'), ((708, 731), 'itertools.tee', 'itertools.tee', (['iterable'], {}), '(iterable)\n', (721, 731), False, 'import itertools\n'), ((5246, 5261), 'numpy.amin', 'numpy.amin', (['arr'], {}), '(arr)\n', (5256, 5261), False, 'import numpy\n'), ((5275, 5290), 'numpy.amax', 'numpy.amax', (['arr'], {}), '(arr)\n', (5285, 5290), False, 'import numpy\n'), ((5657, 5709), 'numpy.array', 'numpy.array', (['[[1, v[0], v[1]] for v in (v0, v1, v2)]'], {}), '([[1, v[0], v[1]] for v in (v0, v1, v2)])\n', (5668, 5709), False, 'import numpy\n'), ((6262, 6294), 'numpy.empty', 'numpy.empty', (['(A.row.shape[0], 3)'], {}), '((A.row.shape[0], 3))\n', (6273, 6294), False, 'import numpy\n'), ((4728, 4750), 'time.sleep', 'time.sleep', (['time_delta'], {}), '(time_delta)\n', (4738, 4750), False, 'import time\n'), ((5721, 5742), 'numpy.linalg.det', 'numpy.linalg.det', (['mat'], {}), '(mat)\n', (5737, 5742), False, 'import numpy\n'), ((4701, 4719), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4717, 4719), False, 'import sys\n'), ((3561, 3575), 'numpy.floor', 'numpy.floor', (['x'], {}), '(x)\n', (3572, 3575), False, 'import numpy\n'), ((3631, 3645), 'numpy.floor', 'numpy.floor', (['y'], {}), '(y)\n', (3642, 3645), False, 'import numpy\n'), ((3731, 3745), 'numpy.floor', 'numpy.floor', (['x'], {}), '(x)\n', (3742, 3745), False, 'import numpy\n'), ((3805, 3819), 'numpy.floor', 'numpy.floor', (['y'], {}), '(y)\n', (3816, 3819), False, 'import numpy\n'), ((4529, 4548), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (4546, 4548), False, 'import logging\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.